commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
f03f976696077db4146ea78e0d0b1ef5767f00ca | Add high level signing capabilities | tests/unit/test_sign.py | tests/unit/test_sign.py | # Import libnacl libs
import libnacl.sign
# Import pythonlibs
import unittest
class TestSigning(unittest.TestCase):
'''
'''
def test_sign(self):
msg = ('Well, that\'s no ordinary rabbit. That\'s the most foul, '
'cruel, and bad-tempered rodent you ever set eyes on.')
signer = libnacl.sign.Signer()
signed = signer.sign(msg)
self.assertNotEqual(msg, signed)
veri = libnacl.sign.Verifier(signer.hex_vk())
verified = veri.verify(signed)
self.assertEqual(verified, msg)
| Python | 0 | |
f6609763f832cd5672e40d1dfe8f7dc7c58ca7c5 | Create diarygui.py | _src/om2py2w/2wex0/diarygui.py | _src/om2py2w/2wex0/diarygui.py | # -*- coding: utf-8 -*-
# ------------2w task:simple diary GUI-----------
# --------------created by bambooom--------------
from Tkinter import * # import Tkinter module
from ScrolledText import * # ScrolledText module = Text Widget + scrollbar
global newlog
class Application(Frame): # 基本框架
def __init__(self, master=None):
Frame.__init__(self, master)
self.pack()
self.createWidgets()
def createWidgets(self): # 组件
newlog = StringVar()
l = Label(self, text = "Input here: ") # Label Widget 提示输入
l.grid(row = 0, column = 0, sticky = W)
e = Entry(self,textvariable=newlog,width=80) # Entry box 输入框
e.grid(row = 0, column = 1, sticky = W)
t = ScrolledText(self) # ScrolledText 打印出文档的框
t.grid(columnspan = 2, sticky = W)
b = Button(self, text="QUIT", fg="red", command=self.quit) # 退出的button
b.grid(row = 2, column = 0, sticky = W)
root = Tk()
root.title('MyDiary Application')
app = Application(root)
# 主消息循环:
app.mainloop()
| Python | 0 | |
c43c7d523ddbb5b914748a20d55971fbf1c12496 | Create oauth2token.py | oauth2token.py | oauth2token.py | #!/usr/bin/python
'''
This script will attempt to open your webbrowser,
perform OAuth 2 authentication and print your access token.
It depends on two libraries: oauth2client and gflags.
To install dependencies from PyPI:
$ pip install python-gflags oauth2client
Then run this script:
$ python get_oauth2_token.py
This is a combination of snippets from:
https://developers.google.com/api-client-library/python/guide/aaa_oauth
'''
import sys
sys.path.append('/usr/lib/python2.7/dist-packages')
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.tools import run
from oauth2client.file import Storage
CLIENT_ID = '411103951529-nf611s2285n12mmqrkigq3ckgkac1gmv.apps.googleusercontent.com'
CLIENT_SECRET = 'uDKCenlmvo1desQfylHIUnYr'
flow = OAuth2WebServerFlow(client_id=CLIENT_ID,
client_secret=CLIENT_SECRET,
scope='https://spreadsheets.google.com/feeds https://docs.google.com/feeds',
redirect_uri='http://example.com/auth_return')
storage = Storage('creds.data')
credentials = run(flow, storage)
print ("access_token: %s") % credentials.access_token
| Python | 0.000016 | |
2de7f3d382c935d6b9036de58466ea03ab58c8fd | Add script to run registration task remotely. | bin/mujin_controllerclientpy_runregistrationtask.py | bin/mujin_controllerclientpy_runregistrationtask.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import time
import datetime
import argparse
from mujincontrollerclient.controllerclientbase import ControllerClient
from mujincontrollerclient import uriutils
import logging
log = logging.getLogger(__name__)
def _RunMain():
parser = argparse.ArgumentParser(description='Run registration task on remote controller')
parser.add_argument('--logLevel', action='store', type=str, dest='logLevel', default='INFO', help='the python log level, e.g. DEBUG, VERBOSE, ERROR, INFO, WARNING, CRITICAL [default=%(default)s]')
parser.add_argument('--controllerUrl', action='store', type=str, dest='controllerUrl', default='http://localhost', help='controller url e.g http://controller123 [default=%(default)s]')
parser.add_argument('--controllerUsername', action='store', type=str, dest='controllerUsername', default='mujin', help='controller username [default=%(default)s]')
parser.add_argument('--controllerPassword', action='store', type=str, dest='controllerPassword', default='mujin', help='controller password [default=%(default)s]')
parser.add_argument('--scenepk', action='store', type=str, dest='scenepk', default=None, help='scene primary key, if not specified, will determine from remote system [default=%(default)s]')
parser.add_argument('--ftpHost', action='store', type=str, dest='ftpHost', default='127.0.0.1', help='ftp server hostname or ip address [default=%(default)s]')
parser.add_argument('--ftpPort', action='store', type=int, dest='ftpPort', default=21, help='ftp server port [default=%(default)r]')
parser.add_argument('--ftpUsername', action='store', type=str, dest='ftpUsername', default='anonymous', help='ftp username [default=%(default)s]')
parser.add_argument('--ftpPassword', action='store', type=str, dest='ftpPassword', default='', help='ftp password [default=%(default)s]')
parser.add_argument('--ftpPath', action='store', type=str, dest='ftpPath', default='', help='path on ftp server, if not supplied, will use home directory of the user [default=%(default)s]')
parser.add_argument('--syncMasterFile', action='store', type=str, dest='syncMasterFile', default=None, help='if supplied, will sync this master file on FTP, e.g. /somewhere/masterfile.txt [default=%(default)s]')
parser.add_argument('--backup', action='store_true', dest='backup', default=False, help='backup registration objects to ftp [default=%(default)s]')
parser.add_argument('--outputFilename', action='store', type=str, dest='outputFilename', default=None, help='If supplied, will output to file specified, otherwise file will be named after task name [default=%(default)s]')
options = parser.parse_args()
# configure logging
try:
from mujincommon import ConfigureRootLogger
ConfigureRootLogger(level=options.logLevel)
except ImportError:
logging.basicConfig(format='%(asctime)s %(name)s [%(levelname)s] [%(filename)s:%(lineno)s %(funcName)s] %(message)s', level=options.logLevel)
taskType = 'registration'
command = None
if options.syncMasterFile:
command = 'SyncMasterFile'
elif options.backup:
command = 'Backup'
else:
raise Exception('Have to sepecify either --syncMasterFile or --backup')
taskName = 'registration-%s-%s' % (command.lower(), datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))
controllerclient = ControllerClient(options.controllerUrl, options.controllerUsername, options.controllerPassword)
controllerclient.Ping()
# cancel previous jobs
for job in controllerclient.GetJobs():
if '/registration-' in job['description']:
controllerclient.DeleteJob(job['pk'])
# determine scenepk
if options.scenepk is None:
options.scenepk = uriutils.GetPrimaryKeyFromURI(controllerclient.GetConfig()['sceneuri'])
# delete previous task
for task in controllerclient.GetSceneTasks(options.scenepk):
if task['tasktype'] == taskType:
controllerclient.DeleteSceneTask(options.scenepk, task['pk'])
# create task
task = controllerclient.CreateSceneTask(options.scenepk, {
'tasktype': taskType,
'name': taskName,
'taskparameters': {
'command': command,
'fileStorageInfo': {
'type': 'ftp',
'username': options.ftpUsername,
'password': options.ftpPassword,
'host': options.ftpHost,
'port': options.ftpPort,
'remotePath': options.ftpPath,
},
'remoteMasterFilePath': options.syncMasterFile,
}
})
taskpk = task['pk']
log.info('task created: %s: %s', taskpk, task['name'])
# run task async
jobpk = controllerclient._webclient.APICall('POST', 'job/', data={
'scenepk': options.scenepk,
'target_pk': taskpk,
'resource_type': 'task',
}, expectedStatusCode=200)['jobpk']
log.info('job started: %s', jobpk)
# wait for job
startTime = time.time()
jobProgress = None
while True:
job = ([j for j in controllerclient.GetJobs() if j['pk'] == jobpk] or [None])[0]
if job is None:
if jobProgress is not None:
# job has been seen before, so must be done now
break
if time.time() - startTime > 2.0:
# perhaps job finished too quickly
break
# wait a little bit and check for job again
time.sleep(0.05)
continue
newProgress = (
min(1.0, max(jobProgress[0] if jobProgress else 0.0, float(job['progress']))),
job['status'],
job['status_text']
)
if newProgress != jobProgress:
jobProgress = newProgress
log.info('progress %.02f%%: %s: %s', jobProgress[0] * 100.0, jobProgress[1], jobProgress[2])
if job['status'] in ('succeeded', 'aborted'):
break
if job['status'] in ('lost', 'preempted'):
raise Exception('Job has stopped unexpectedly: %s' % job['status'])
time.sleep(0.5)
# wait for result
result = None
startTime = time.time()
while True:
task = controllerclient.GetSceneTask(options.scenepk, taskpk)
if len(task['binpickingresults']) > 0:
result = controllerclient.GetBinpickingResult(task['binpickingresults'][0]['pk'])
break
if time.time() - startTime > 5.0:
raise Exception('Timed out waiting for task result')
# write result
if not options.outputFilename:
options.outputFilename = '%s.json' % taskName
with open(options.outputFilename, 'w') as f:
json.dump(result, f, ensure_ascii=False, indent=2, separators=(',', ': '), sort_keys=True)
f.write('\n')
log.info('result written to: %s', options.outputFilename)
if __name__ == '__main__':
_RunMain()
| Python | 0 | |
31cdb65a8d370c6f309ad610aa3b969d5bfb8706 | Add follow_bot.py | follow_bot.py | follow_bot.py | """Follow bot, to follow some followers from an account
"""
__date__ = '08/01/2014'
__author__ = '@ismailsunni'
import tweepy
import constants
# constants
consumer_key = constants.consumer_key
consumer_secret = constants.consumer_secret
access_key = constants.access_key
access_secret = constants.access_secret
def need_to_follow(user):
statuses_count = user.statuses_count
followers_count = user.followers_count
friends_count = user.friends_count
created_at = user.created_at
# last_status_time = user.status.created_at
if followers_count > friends_count:
return True
else:
return False
def main():
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_key, access_secret)
api = tweepy.API(auth)
# accounts = ['sarapanhaticom']
accounts = ['rischanmafrur']
for account in accounts:
followers = api.followers(account)
print followers
for follower in followers:
if need_to_follow(follower):
print follower.screen_name
try:
friend = api.create_friendship(follower.screen_name)
if friend.screen_name == follower.screen_name:
print 'Follow ' + follower.name + ' success'
else:
print 'Follow ' + follower.name + ' failed'
except tweepy.TweepError, e:
print e
print 'benar'
if __name__ == '__main__':
main() | Python | 0.00002 | |
91e84e0f47792b44a6cf8eddbb5fb1489879613e | Create WriteRecord.py | WriteRecord.py | WriteRecord.py | import json
class WriteRecord:
def create_data_file(self):
self.jsonrec = {
"ASXListedCompanies": {
"filename": "ASXListedCompanies.csv",
"source": "ASX",
"url": "http://www.asx.com.au/asx/research/ASXListedCompanies.csv",
"local_file": "/stock_market/data/DailyFiles/USA/ASXListedCompanies.csv",
"notes":
"Timestamp first format: 'ASX listed companies as at Sat Aug 13"
" 21:00:02 EST 2016' followed by several empty lines, followed"
" by header format: 'Company name,ASX code,GICS industry group'",
"delimiter": ",",
"numfields": 3,
"dbtablename": "ASXListed",
"dbtabledesc": "ASX - one of the world\u2019s leading financial market exchanges",
"columns": [
{
"field_name": "Company name",
"db_column_name": "CompanyName",
"db_column_desc": "The company name",
"db_column_type": "VARCHAR"
},
{
"field_name": "ASX code",
"db_column_name": "AsxSymbol",
"db_column_desc": "The ASX Code (symbol)",
"db_column_type": "VARCHAR"
},
{
"field_name": "GICS industry group",
"db_column_name": "GicsIndustryGroup",
"db_column_desc": "Name of Industry Group",
"db_column_type": "VARCHAR"
}
]
},
"nasdaqlisted": {
"filename": "nasdaqlisted.txt",
"source": "NASDAQ",
"url": "ftp://ftp.nasdaqtrader.com/symboldirectory/nasdaqlisted.txt",
"local_file": "\stock_market\data\DailyFiles\\USA\\nasdaqlisted.txt",
"notes":
"Order Must be maintained in header - Use Number key to access"
"The last row of each Symbol Directory text file contains a"
" timestamp that reports the File Creation Time. The file"
" creation time is based on when NASDAQ Trader generates the"
" file and can be used to determine the timeliness of the"
" associated data. The row contains the words File Creation Time"
" followed by mmddyyyyhhmm as the first field, followed by all"
" delimiters to round out the row. An example: File Creation"
" Time: 1217200717:03|||||"
"CreatedDate - 'File Creation Time: MMDDYYYYHR:MN']",
"delimiter": "|",
"numfields": 8,
"dbtablename": "NasdaqListed",
"dbtabledesc":
"ASX is one of the world’s leading financial market"
" exchanges, offering a full suite of services,"
" including listings, trading, clearing and settlement,"
" across a comprehensive range of asset classes. As the"
" first major financial market open every day, ASX is a"
" world leader in raising capital, consistently ranking"
" among the top five exchanges globally. With a total"
" market capitalisation of around $1.5 trillion, ASX is"
" home to some of the world’s leading resource, finance"
" and technology companies. Our $47 trillion interest rate"
" derivatives market is the largest in Asia and among the"
" biggest in the world.",
"columns": [
{
"field_name": "Symbol",
"db_column_name": "Symbol",
"db_column_desc":
"The one to four or five character identifier for each"
" NASDAQ-listed security.",
"db_column_type": "VARCHAR"
},
{
"field_name": "Security Name",
"db_column_name": "SecurityName",
"db_column_desc": "Company issuing the security.",
"db_column_type": "VARCHAR"
},
{
"field_name": "Market Category",
"db_column_name": "MarketCategory",
"db_column_desc": "The category assigned to the issue by NASDAQ based on Listing Requirements. Values",
"db_column_type": "VARCHAR"
},
{
"field_name": "Test Issue",
"db_column_name": "TestIssue",
"db_column_desc": "Indicates whether or not the security is a test security.",
"db_column_type": "VARCHAR"
},
{
"field_name": "Financial Status",
"db_column_name": "FinancialStatus",
"db_column_desc": "Indicates when an issuer has failed to submit its regulatory filings on a timely basis, has failed to meet NASDAQ's continuing listing standards, and/or has filed for bankruptcy.",
"db_column_type": "VARCHAR"
},
{
"field_name": "Round Lot Size",
"db_column_name": "RoundLotSize",
"db_column_desc": "Indicates the number of shares that make"
" up a round lot for the given security.",
"db_column_type": "NUMERIC"
},
{
"field_name": "ETF",
"db_column_name": "ETF",
"db_column_desc": "Identifies whether the security is an"
" exchange traded fund",
"db_column_type": "VARCHAR"
},
{
"field_name": "Next Shares",
"db_column_name": "NextSghares",
"db_column_desc": "",
"db_column_type": "VARCHAR"
}
]
}
}
with open('StockData.json', 'w') as f:
j = json.dumps(self.jsonrec)
f.write(j)
if __name__ == '__main__':
wd = WriteRecord()
wd.create_data_file()
print(wd.jsonrec)
| Python | 0.000001 | |
30f18a4be667b02f8d0f6c2f2bf97146992d3208 | Add first version of OpenCV core | opencv/core.py | opencv/core.py | # -*- coding: utf-8 -*-
"""
Created on Mon Apr 1 18:19:38 2013
@author: matz
"""
import cvtype
import datatype
import document
import generator
import package
import test
# abbreviations
dt = test.Default()
# utilitiy functions
dcl = document.Document()
dcl.line("void checkEnumValue(const stromx::runtime::Enum & value, "
"const stromx::runtime::EnumParameter* param, "
"const stromx::runtime::OperatorKernel& op);")
dclIncludes = ["<stromx/runtime/Enum.h>",
"<stromx/runtime/EnumParameter.h>",
"<stromx/runtime/OperatorKernel.h>"]
dtn = document.Document()
dtn.line("void checkEnumValue(const stromx::runtime::Enum & value, "
"const stromx::runtime::EnumParameter* param, "
"const stromx::runtime::OperatorKernel& op)")
dtn.scopeEnter()
dtn.line("using namespace runtime;")
dtn.blank()
dtn.line("for(std::vector<EnumDescription>::const_iterator "
"iter = param->descriptions().begin(); iter != "
"param->descriptions().end(); ++iter)")
dtn.scopeEnter()
dtn.line(" if(value == iter->value())")
dtn.line("return;")
dtn.scopeExit()
dtn.line("throw stromx::runtime::WrongParameterValue(*param, op);")
dtn.scopeExit()
dtnIncludes = ["<stromx/runtime/OperatorException.h>"]
checkEnumValue = package.Function(dcl, dclIncludes, dtn, dtnIncludes)
dcl = document.Document()
dclIncludes = ["<stromx/runtime/NumericParameter.h>",
"<stromx/runtime/OperatorException.h>"]
dcl.line("template<class T>");
dcl.line("void checkNumericValue(const T & value, const "
"runtime::NumericParameter<T>* param, "
"const stromx::runtime::OperatorKernel& op)");
dcl.scopeEnter()
dcl.line("if(value < runtime::data_cast<T>(param->min()))")
dcl.increaseIndent()
dcl.line("throw runtime::WrongParameterValue(*param, op);")
dcl.decreaseIndent()
dcl.line("if(value > runtime::data_cast<T>(param->max()))")
dcl.increaseIndent()
dcl.line("throw runtime::WrongParameterValue(*param, op);")
dcl.decreaseIndent()
dcl.scopeExit()
checkNumericValue = package.Function(dcl, dclIncludes)
# initializations
initInCopy = document.Document((
"{1}->initializeImage({0}->width(), {0}->height(), {0}->stride(), "
"{1}->data(), {0}->pixelType());").format("srcCastedData", "dstCastedData"
))
initOutCopy = document.Document((
"{1}->initializeImage({1}->width(), {1}->height(), {1}->stride(), "
"{1}->data(), {0}->pixelType());").format("srcCastedData", "dstCastedData"
))
# arguments
srcImg1 = package.Argument(
"src1", "Source 1", cvtype.Mat(), datatype.Image()
)
srcImg2 = package.Argument(
"src2", "Source 2", cvtype.Mat(), datatype.Image()
)
dstImg = package.Argument(
"dst", "Destination", cvtype.Mat(), datatype.Image(), initIn = initInCopy,
initOut = initOutCopy
)
# test data
lenna = test.ImageFile("lenna.jpg")
memory = test.ImageBuffer(1000000)
# add
manual = package.Option(
"manual", "Manual",
[package.Input(srcImg1), package.Input(srcImg2), package.Output(dstImg)],
tests = [
[lenna, lenna, memory]
]
)
allocate = package.Option(
"allocate", "Allocate",
[package.Input(srcImg1), package.Input(srcImg2), package.Allocation(dstImg)],
tests = [
[lenna, lenna, dt]
]
)
add = package.Method(
"add", options = [manual, allocate]
)
core = package.Package(
"core", 0, 0, 1,
methods = [
add
],
functions = [
checkEnumValue,
checkNumericValue
],
testFiles = [
"lenna.jpg"
]
)
generator.generatePackageFiles(core) | Python | 0 | |
e26be1cdee6b40896e7ee5c2a894fba05fc58480 | Add traceview directory. | traceview/__init__.py | traceview/__init__.py | # -*- coding: utf-8 -*-
"""
TraceView API library
:copyright: (c) 2014 by Daniel Riti.
:license: MIT, see LICENSE for more details.
"""
__title__ = 'traceview'
__version__ = '0.1.0'
__author__ = 'Daniel Riti'
__license__ = 'MIT'
| Python | 0 | |
b28d2933ac1b5c6375f9dd5142f467a06bd69463 | add a simple plot script to visualize the distribution | Utils/py/BallDetection/Evaluation/plot_csv.py | Utils/py/BallDetection/Evaluation/plot_csv.py | import matplotlib.pyplot as plt
import sys
import numpy as np
scores = np.genfromtxt(sys.argv[1], usecols=(1), skip_header=1, delimiter=",")
scores = np.sort(scores)
plt.style.use('seaborn')
plt.plot(scores)
plt.show() | Python | 0 | |
6d910181758008d05de3917fdac5b35b34188a8e | add RebootNodeWithPCU call. fails gracefully if dependencies are not met. | PLC/Methods/RebootNodeWithPCU.py | PLC/Methods/RebootNodeWithPCU.py | import socket
from PLC.Faults import *
from PLC.Method import Method
from PLC.Parameter import Parameter, Mixed
from PLC.Nodes import Node, Nodes
from PLC.NodeNetworks import NodeNetwork, NodeNetworks
from PLC.Auth import Auth
from PLC.POD import udp_pod
try:
from pcucontrol import reboot
external_dependency = True
except:
external_dependency = False
class RebootNodeWithPCU(Method):
"""
Uses the associated PCU to attempt to reboot the given Node.
Admins can reboot any node. Techs and PIs can only reboot nodes at
their site.
Returns 1 if the reboot proceeded without error (Note: this does not guarantee
that the reboot is successful).
Returns -1 if external dependencies for this call are not available.
Returns "error string" if the reboot failed with a specific message.
"""
roles = ['admin', 'pi', 'tech']
accepts = [
Auth(),
Mixed(Node.fields['node_id'],
Node.fields['hostname'])
]
returns = Parameter(int, '1 if successful')
def call(self, auth, node_id_or_hostname):
# Get account information
nodes = Nodes(self.api, [node_id_or_hostname])
if not nodes:
raise PLCInvalidArgument, "No such node"
node = nodes[0]
# Authenticated function
assert self.caller is not None
# If we are not an admin, make sure that the caller is a
# member of the site at which the node is located.
if 'admin' not in self.caller['roles']:
if node['site_id'] not in self.caller['site_ids']:
raise PLCPermissionDenied, "Not allowed to reboot nodes from specified site"
# Verify that the node has pcus associated with it.
pcus = PCUs(self.api, {'pcu_id' : node['pcu_ids']} )
if not pcus:
raise PLCInvalidArgument, "No PCUs associated with Node"
pcu = pcus[0]
if not external_dependency:
raise PLCNotImplemented, "Could not load external module to attempt reboot"
# model, hostname, port,
# i = pcu['node_ids'].index(node['node_id'])
# p = pcu['ports'][i]
ret = reboot.reboot_api(node, pcu)
self.event_objects = {'Node': [node['node_id']]}
self.message = "RebootNodeWithPCU called"
return ret
| Python | 0 | |
f75e1397735adcbd39dbc90a0446b9efd9532be4 | add initial python script to handle button events that trigger the node process | bin/selfie.py | bin/selfie.py | #!/usr/bin/python
import RPi.GPIO as GPIO
import time
from subprocess import call
GPIO.setmode(GPIO.BCM)
BUTTON = 18;
GPIO.setup(BUTTON, GPIO.IN, pull_up_down=GPIO.PUD_UP)
while True:
input_state = GPIO.input(BUTTON)
if input_state == False:
print('Button Pressed')
call(["node", "./index.js"])
time.sleep(1)
| Python | 0 | |
fb83969c6467e288ff16661aec2eafc174bdf124 | correct fieldsight form issue fix | onadata/apps/fsforms/management/commands/set_correct_fxf_in_finstance.py | onadata/apps/fsforms/management/commands/set_correct_fxf_in_finstance.py | from django.db import transaction
from django.core.management.base import BaseCommand
from onadata.apps.fieldsight.models import Site
from onadata.apps.fsforms.models import FieldSightXF, FInstance
from onadata.apps.viewer.models.parsed_instance import update_mongo_instance
class Command(BaseCommand):
help = 'Deploy Stages'
def handle(self, *args, **options):
organization_id = 13
# project_id = 30
sites = Site.objects.filter(project__organization__id=organization_id).values_list('id', flat=True)
for site_id in sites:
# self.stdout.write('Operating in site '+str(site_id))
with transaction.atomic():
finstances = FInstance.objects.filter(site_id=site_id, site_fxf_id__isnull=False)
for fi in finstances:
site_fsxf = fi.site_fxf
if site_fsxf.site.id != site_id:
correct_form = FieldSightXF.objects.get(site__id=site_id, is_staged=True, fsform=fi.project_fxf)
fi.site_fxf = correct_form
fi.save()
parsed_instance = fi.instance.parsed_instance
d = parsed_instance.to_dict_for_mongo()
d.update({'fs_uuid': correct_form.id})
update_mongo_instance(d)
self.stdout.write('Successfully corrected form')
| Python | 0 | |
0f76875400ea1a03a23a4b266eb0ca9bf574922d | implement 9 (9) 各行を2コラム目,1コラム目の優先順位で辞書の逆順ソートしたもの(注意: 各行の内容は変更せずに並び替えよ).確認にはsortコマンドを用いよ(この問題は結果が合わなくてもよい). | set01/09.py | set01/09.py | # -*- coding: utf-8 -*-
# (9) 各行を2コラム目,1コラム目の優先順位で辞書の逆順ソートしたもの(注意: 各行の内容は変更せずに並び替えよ).確認にはsortコマンドを用いよ(この問題は結果が合わなくてもよい).
import sys
lines = [line.decode('utf-8').rstrip(u'\r\n') for line in sys.stdin.readlines()]
lines = sorted(lines, key = lambda l: l.split(u'\t')[0])
lines = sorted(lines, key = lambda l: l.split(u'\t')[1])
for line in lines:
print line.encode('utf-8')
| Python | 0.000001 | |
d654bf0fb0c5e3fc7a11029a216c109b5f04d37b | Add __init__ file | taxdata/cps/__init__.py | taxdata/cps/__init__.py | # flake8: noqa
from taxdata.cps import benefits
from taxdata.cps import cps_meta
from taxdata.cps import cpsmar
from taxdata.cps.create import create
from taxdata.cps.finalprep import finalprep
from taxdata.cps import helpers
from taxdata.cps import impute
from taxdata.cps import pycps
from taxdata.cps import splitincome
from taxdata.cps import targeting
from taxdata.cps import taxunit
from taxdata.cps import validation
from taxdata.cps import constants
| Python | 0.00026 | |
1fddb845ad99bb65aa7b86155d899043a64ebdcf | Update app/views/main/views.py | app/views/main/views.py | app/views/main/views.py | from flask import current_app as app
from flask import flash
from flask import redirect
from flask import render_template
from flask import url_for
from flask_login import current_user
from flask_login import login_required
from . import main
from .forms import SearchForm
from ..api.utils import _search
@main.route('/', methods=['GET', 'POST'])
def index():
return render_template('index.html')
@main.route('/dashboard', methods=['GET'])
@login_required
def dashboard():
bio = app.db.bios.count()
payroll = app.db.payrolls.count()
work = app.db.work_histories.count()
context = {
'counter': {
'Bio': bio,
'Payrolls': payroll,
'Work Histories': work,
'Mortgages': 0,
'Rents': 0,
'Utilities': 0,
'Loans': 0,
'Education Histories': 0
},
'total_records': bio + payroll + work
}
context.update(labels=list(context['counter'].keys()),
values=list(context['counter'].values()))
return render_template('main/dashboard.html', **context)
@main.route('/search', methods=['GET', 'POST'])
@login_required
def search():
context = {}
form = SearchForm()
if form.validate_on_submit():
bvn = form.bvn.data
context.update(bvn=form.bvn.data)
result = _search(bvn, app)
if result.get('status') == 'error':
flash(result.get('message'), 'error')
context.update(enrollee=result)
else:
for error in form.errors.values():
if isinstance(error, list):
for e in error:
flash(e, 'error')
else:
flash(error, 'error')
return render_template('search/results.html', **context)
| Python | 0 | |
9243f9264b0cc54cfe9a59be3f4435b2cd009875 | add analysis functions | scarce/analysis.py | scarce/analysis.py | r"""Helper functions for complex analysis """
import numpy as np
from scipy import integrate
from scarce import solver
def get_charge_planar(width, thickness, pot_descr, pot_w_descr, t_e_trapping=0., t_h_trapping=0., grid_x=5, grid_y=5, n_pairs=10, dt=0.001, n_steps=25000, temperature=300):
''' Calculate the collected charge in one planar pixel
Charge is given as a 2d map depending on the start postitions of the e-h pairs.
Parameters
----------
width: number
Pixel width in um
thickness: number
Pixel thickness in um
pot_descr, pot_w_descr: scarce.fields.Description
Solution for the drift/weightning potential.
grid_x, grid_y: number
Grid spacing in um
n_pairs: number
of pseudo e-h pairs per grid point
dt: float
Time step in simulation in ns. Should be 1 ps to give reasonable diffusion
n_steps: int
Time steps to simulate
'''
# Number of x/y bins
x_bins = int(width / grid_x)
y_bins = int(thickness / grid_y)
# Bin positions
range_x = (-width / 2., width / 2.)
range_y = (0, thickness)
# Create e-h pairs in the pixel, avoid charge carriers on boundaries
# e.g. x = -width / 2 or y = 0
xx, yy = np.meshgrid(np.linspace(range_x[0] + grid_x / 2.,
range_x[1] - grid_x / 2., x_bins),
np.repeat(np.linspace(range_y[0] + grid_y / 2.,
range_y[1] - grid_y / 2., y_bins),
n_pairs), # 10 e-h per position
sparse=False) # All combinations of x / y
# Start positions
p0 = np.array([xx.ravel(), yy.ravel()]) # Position [um]
# Initial charge set to 1
q_start = 1.
q0 = np.ones(p0.shape[1]) * q_start
# Needed for histograming, numerical accuracy demands > 1
q_max = q_start * 1.05
t = np.linspace(0, n_steps * dt, 1000)
dd = solver.DriftDiffusionSolver(pot_descr, pot_w_descr,
T=temperature, diffusion=True,
t_e_trapping=t_e_trapping, t_h_trapping=t_h_trapping, save_frac=50)
traj_e, traj_h, I_ind_e, I_ind_h, T, _, Q_ind_e_tot, Q_ind_h_tot = dd.solve(p0, q0, dt, n_steps,
multicore=True)
# Trajectory at t=0 is start position
pos_0 = traj_e[0]
# Interpolate data to fixed time points for easier plotting
# I_ind_e = tools.time_data_interpolate(T, I_ind_e, t, axis=0, fill_value=0.)
# I_ind_h = tools.time_data_interpolate(T, I_ind_h, t, axis=0, fill_value=0.)
I_ind_e[np.isnan(I_ind_e)] = 0.
I_ind_h[np.isnan(I_ind_h)] = 0.
Q_ind_e = integrate.cumtrapz(I_ind_e, T, axis=0, initial=0)
Q_ind_h = integrate.cumtrapz(I_ind_h, T, axis=0, initial=0)
# Last index with data (time != nan)
# index = np.nanargmax(T, axis=0)
# y = np.indices(index.shape)
# Last recorded integrated charge is total induced charge
# q_ind = Q_ind_e[index, y][0] + Q_ind_h[index, y][0]
q_ind = Q_ind_e_tot + Q_ind_h_tot
# Histogram charge per start position
data = np.vstack((pos_0[0], pos_0[1], q_ind)).T
n_bins_c = 200 # Number of charge bins
H, edges = np.histogramdd(sample=data,
bins=(x_bins, y_bins, n_bins_c),
range=((range_x[0], range_x[1]),
(range_y[0], range_y[1]),
(0., q_max)))
# Result hist
charge_pos = np.zeros(shape=(x_bins, y_bins))
sel = (np.sum(H, axis=2) != 0)
weights = (edges[2][:-1] + edges[2][1:]) / 2.
charge_pos[sel] = np.average(H, axis=2,
weights=weights)[sel] * weights.sum() / np.sum(H, axis=2)[sel]
edges_x = (edges[0][:-1] + edges[0][1:]) / 2.
edges_y = (edges[1][:-1] + edges[1][1:]) / 2.
# for xi, yi in zip(*np.where(np.logical_and(charge_pos > 0.1,
# charge_pos < 0.9))
# ):
# print edges_x[xi], edges_y[yi], charge_pos[xi, yi]
# plt.clf()
# plt.bar(weights, H[xi, yi], width=np.diff(weights)[0])
# plt.show()
# plt.clf()
# sel = np.logical_and(pos_0[0] == edges_x[xi],
# pos_0[1] == edges_y[yi])
# plt.plot(T[:, sel], Q_ind_e[:, sel] + Q_ind_h[:, sel])
# for c in weights[H[xi, yi].astype(np.bool)]:
# plt.plot(plt.xlim(), [c, c])
# plt.show()
# plt.clf()
# plt.plot(
# T[:, sel], traj_e[:, 0, sel], '-.', linewidth=1, label='e_x')
# plt.plot(
# T[:, sel], traj_e[:, 1, sel], '--', linewidth=1, label='e_y')
# plt.plot(
# T[:, sel], traj_h[:, 0, sel], '-.', linewidth=1, label='h_x')
# plt.plot(
# T[:, sel], traj_h[:, 1, sel], '--', linewidth=1, label='h_y')
# plt.legend(loc=2)
# plt.show()
# break
return edges[0], edges[1], charge_pos.T
| Python | 0.000001 | |
938a9548b6503136b82fd248258df5f4e0523f8a | add sorting_algorithms.py | adv/sorting_algorithms.py | adv/sorting_algorithms.py | # Sorting Algorithms
import random
import time
my_list = range(10000)
random.shuffle(my_list)
#print sorted(my_list) #We have a way to sort information.
# But how did it do that?
###################################################################
# What does "efficiency" mean in terms of a program?
# 1. Running time. Does it take a really long time to run?
# 2. Resources. (Memory, Power)
# 3. Lines of code
# 4. Manpower
def is_sorted(lst):
if len(lst) <= 1:
return True
else:
return lst[0] <= lst[1] and is_sorted(lst[1:])
def stupid_sort(lst):
while not is_sorted(lst):
random.shuffle(lst)
return lst
def dumb_sort(lst):
number_list= [None] * 10000000
for number in lst:
number_list[number] = number
sorted_list = []
for thing in number_list:
if thing:
sorted_list.append(thing)
return sorted_list
def insertion_sort(lst):
new_list = [lst[0]]
for element in lst[1:]:
for index, new_element in enumerate(new_list):
if element <= new_element:
new_list.insert(index, element)
found = True
break
else:
new_list.append(element)
return new_list
def selection_sort(lst):
new_list = []
length = len(lst)
while len(new_list) != length:
element = min(lst)
lst.remove(element)
new_list.append(element)
return new_list
def merge(left, right):
new_list = []
while len(left) > 0 and len(right) > 0:
if left[0] <= right[0]:
new_list.append(left.pop(0))
else:
new_list.append(right.pop(0))
return new_list + left + right
def merge_sort(lst):
if len(lst) <= 1:
return lst
else:
middle = len(lst) / 2
return merge(merge_sort(lst[:middle]), merge_sort(lst[middle:]))
start = time.time()
answer = merge_sort(my_list)
end = time.time()
print 'It took {} seconds!'.format(end-start)
| Python | 0.003966 | |
bb7031385af7931f9e12a8987375f929bcfb6b5a | Create script that checks for dev and docs dependencies. | scripts/devdeps.py | scripts/devdeps.py | from __future__ import print_function
import sys
try:
import colorama
def blue(text): return "%s%s%s" % (colorama.Fore.BLUE, text, colorama.Style.RESET_ALL)
def red(text): return "%s%s%s" % (colorama.Fore.RED, text, colorama.Style.RESET_ALL)
except ImportError:
def blue(text) : return text
def red(text) : return text
def depend_check(deps_name, *args):
"""Check for missing dependencies
"""
found = True
missing = []
for dependency in args:
try:
__import__(dependency)
except ImportError as e:
missing.append(dependency)
found = False
print('-'*80)
if not found:
print(red("You are missing the following %s dependencies:") % deps_name)
for dep in missing:
name = pkg_info_dict.get(dep, dep)
print(" * ", name)
print()
return False
else:
print(blue("All %s dependencies installed! You are good to go!\n") % deps_name)
return True
if __name__ == '__main__':
#Dictionary maps module names to package names
pkg_info_dict = {'bs4' : 'beautiful-soup',
'websocket' : 'websocket-client',
'sphinx_bootstrap_theme' : 'sphinx-bootstrap-theme',
'sphinxcontrib.httpdomain' : 'sphinxcontrib-httpdomain',
'pdiffer' : 'pdiff'
}
dev_deps = ['bs4', 'colorama', 'pdiffer', 'boto', 'nose', 'mock', 'coverage',
'websocket']
depend_check('Dev', *dev_deps)
docs_deps = ['graphviz', 'sphinx', 'pygments', 'sphinx_bootstrap_theme',
'sphinxcontrib.httpdomain']
depend_check('Docs', *docs_deps)
| Python | 0 | |
a23e08275652f7356863edada51e7dee345a2dfc | Add functools from Python trunk r65615 | test-tools/functools.py | test-tools/functools.py | """functools.py - Tools for working with functions and callable objects
"""
# Python module wrapper for _functools C module
# to allow utilities written in Python to be added
# to the functools module.
# Written by Nick Coghlan <ncoghlan at gmail.com>
# Copyright (C) 2006 Python Software Foundation.
# See C source code for _functools credits/copyright
from _functools import partial, reduce
# update_wrapper() and wraps() are tools to help write
# wrapper functions that can handle naive introspection
WRAPPER_ASSIGNMENTS = ('__module__', '__name__', '__doc__')
WRAPPER_UPDATES = ('__dict__',)
def update_wrapper(wrapper,
wrapped,
assigned = WRAPPER_ASSIGNMENTS,
updated = WRAPPER_UPDATES):
"""Update a wrapper function to look like the wrapped function
wrapper is the function to be updated
wrapped is the original function
assigned is a tuple naming the attributes assigned directly
from the wrapped function to the wrapper function (defaults to
functools.WRAPPER_ASSIGNMENTS)
updated is a tuple naming the attributes of the wrapper that
are updated with the corresponding attribute from the wrapped
function (defaults to functools.WRAPPER_UPDATES)
"""
for attr in assigned:
setattr(wrapper, attr, getattr(wrapped, attr))
for attr in updated:
getattr(wrapper, attr).update(getattr(wrapped, attr, {}))
# Return the wrapper so this can be used as a decorator via partial()
return wrapper
def wraps(wrapped,
assigned = WRAPPER_ASSIGNMENTS,
updated = WRAPPER_UPDATES):
"""Decorator factory to apply update_wrapper() to a wrapper function
Returns a decorator that invokes update_wrapper() with the decorated
function as the wrapper argument and the arguments to wraps() as the
remaining arguments. Default arguments are as for update_wrapper().
This is a convenience function to simplify applying partial() to
update_wrapper().
"""
return partial(update_wrapper, wrapped=wrapped,
assigned=assigned, updated=updated)
| Python | 0 | |
d3a652111aa7df0a5ecc429db6aa639f9a667ff9 | Create imogen.py | imogen.py | imogen.py | Python | 0.000001 | ||
ca098b540b171460f41ea66c01d2b0d039feb073 | Add arrange combination algorithm | arrange_combination/arrange.py | arrange_combination/arrange.py | #!/usr/bin/env python
def range(input_list, step):
if step == 3:
print(input_list)
return
for i in range(step, len(input_list)):
input_list[step], input_list[i] = input_list[i], input_list[step]
range(input_list, step+1)
input_list[step], input_list[i] = input_list[i], input_list[step]
def main():
import ipdb;ipdb.set_trace()
input_list = ["a", "b", "c"]
range(input_list, 0)
if __name__ == "__main__":
main()
| Python | 0.000036 | |
c28522ace1efc0d2c7545bbc742356f6f6428812 | Use argparse in the radare module. | modules/radare.py | modules/radare.py | # -*- coding: utf-8 -*-
# This file is part of Viper - https://github.com/botherder/viper
# See the file 'LICENSE' for copying permission.
import os
import sys
import shlex
import subprocess
from viper.common.abstracts import Module
from viper.core.session import __sessions__
ext = ".bin"
run_radare = {'linux2': 'r2', 'darwin': 'r2',
'win32': 'r2'}
class Radare(Module):
cmd = 'r2'
description = 'Start Radare2'
authors = ['dukebarman']
def __init__(self):
super(Radare, self).__init__()
self.parser.add_argument('-w', '--webserver', action='store_true', help='Start web-frontend for radare2')
self.is_64b = False
self.ext = ''
self.server = ''
def open_radare(self, filename):
directory = filename + ".dir"
if not os.path.exists(directory):
os.makedirs(directory)
destination = directory + "/executable" + self.ext
if not os.path.lexists(destination):
os.link(filename, destination)
command_line = '{} {} {}'.format(run_radare[sys.platform], self.server, destination)
args = shlex.split(command_line)
subprocess.Popen(args)
def run(self):
super(Radare, self).run()
if self.parsed_args is None:
return
if not __sessions__.is_set():
self.log('error', "No session opened")
return
if self.parsed_args.webserver:
self.server = "-c=H"
filetype = __sessions__.current.file.type
if 'x86-64' in filetype:
self.is_64b = True
arch = '64' if self.is_64b else '32'
if 'DLL' in filetype:
self.ext = '.dll'
to_print = [arch, 'bit DLL (Windows)']
if "native" in filetype:
to_print.append('perhaps a driver (.sys)')
self.log('info', ' '.join(to_print))
elif 'PE32' in filetype:
self.ext = '.exe'
self.log('info', ' '.join([arch, 'bit executable (Windows)']))
elif 'shared object' in filetype:
self.ext = '.so'
self.log('info', ' '.join([arch, 'bit shared object (linux)']))
elif 'ELF' in filetype:
self.ext = ''
self.log('info', ' '.join([arch, 'bit executable (linux)']))
else:
self.log('error', "Unknown binary")
try:
self.open_radare(__sessions__.current.file.path)
except:
self.log('error', "Unable to start Radare2")
| # -*- coding: utf-8 -*-
# This file is part of Viper - https://github.com/botherder/viper
# See the file 'LICENSE' for copying permission.
import os
import sys
import getopt
from viper.common.out import *
from viper.common.abstracts import Module
from viper.core.session import __sessions__
ext = ".bin"
run_radare = {'linux2': 'r2', 'darwin': 'r2',
'win32': 'r2'}
class Radare(Module):
cmd = 'r2'
description = 'Start Radare2'
authors = ['dukebarman']
def __init__(self):
self.is_64b = False
self.ext = ''
self.server = ''
def open_radare(self, filename):
directory = filename + ".dir"
if not os.path.exists(directory):
os.makedirs(directory)
destination = directory + "/executable" + self.ext
if not os.path.lexists(destination):
os.link(filename, destination)
command_line = '{} {}{}'.format(run_radare[sys.platform], self.server, destination)
os.system(command_line)
def run(self):
if not __sessions__.is_set():
self.log('error', "No session opened")
return
def usage():
self.log('', "usage: r2 [-h] [-s]")
def help():
usage()
self.log('', "")
self.log('', "Options:")
self.log('', "\t--help (-h)\tShow this help message")
self.log('', "\t--webserver (-w)\tStart web-frontend for radare2")
self.log('', "")
try:
opts, argv = getopt.getopt(self.args[0:], 'hw', ['help', 'webserver'])
except getopt.GetoptError as e:
self.log('', e)
return
for opt, value in opts:
if opt in ('-h', '--help'):
help()
return
elif opt in ('-w', '--webserver'):
self.server = "-c=H "
filetype = __sessions__.current.file.type
if 'x86-64' in filetype:
self.is_64b = True
arch = '64' if self.is_64b else '32'
if 'DLL' in filetype:
self.ext = '.dll'
to_print = [arch, 'bit DLL (Windows)']
if "native" in filetype:
to_print.append('perhaps a driver (.sys)')
self.log('info', ' '.join(to_print))
elif 'PE32' in filetype:
self.ext = '.exe'
self.log('info', ' '.join([arch, 'bit executable (Windows)']))
elif 'shared object' in filetype:
self.ext = '.so'
self.log('info', ' '.join([arch, 'bit shared object (linux)']))
elif 'ELF' in filetype:
self.ext = ''
self.log('info', ' '.join([arch, 'bit executable (linux)']))
else:
self.log('error', "Unknown binary")
try:
self.open_radare(__sessions__.current.file.path)
except:
self.log('error', "Unable to start Radare2")
| Python | 0 |
f0da1774514c839b4b97fa92d2202437932dc99a | Add a small driver for plotting skeletons. | analysis/plot-skeleton.py | analysis/plot-skeleton.py | #!/usr/bin/env python
import climate
import database
import plots
@climate.annotate(
root='plot data rooted at this path',
pattern=('plot data from files matching this pattern', 'option'),
)
def main(root, pattern='*/*block02/*trial00*.csv.gz'):
with plots.space() as ax:
for trial in database.Experiment(root).trials_matching(pattern):
plots.skeleton(ax, trial, 100)
break
if __name__ == '__main__':
climate.call(main)
| Python | 0 | |
060c8a4379aef14459929a47bf62a80a3e7eef67 | Create af_setJoints.py | af_scripts/tmp/af_setJoints.py | af_scripts/tmp/af_setJoints.py | import pymel.core as pm
curSel = pm.ls(sl=True,type='transform')[0]
bBox = pm.xform(curSel,ws=1,q=1,bb=1)
sizeX = abs(bBox[0]-bBox[3])
sizeY = abs(bBox[1]-bBox[4])
sizeZ = abs(bBox[2]-bBox[5])
curPvt = [(bBox[0]+sizeX/2),(bBox[1]+sizeY/2),(bBox[2]+sizeZ/2)]
ccUD = pm.circle(n='circle_rotUpDown',r=sizeY/2,nr=(1,0,0))
pm.move(ccUD[0],curPvt)
ccLR = pm.circle(n='circle_rotLeftRight',r=sizeX/2,nr=(0,1,0))
pm.move(ccLR[0],curPvt)
pm.select(d=1)
pm.jointDisplayScale(0.1)
pm.joint(p=(0,bBox[1],bBox[2]),n='joint_base')
pm.joint(p=(pm.xform(ccUD,ws=1,q=1,rp=1)),n='joint_rotUpDown')
pm.joint(p=(pm.xform(ccLR,ws=1,q=1,rp=1)),n='joint_rotLeftRight')
| Python | 0.000001 | |
bde8b61f419dd6e66a85cc92f3661de6aaadeb94 | ADD CHECK FOR YELLING | proselint/checks/misc/yelling.py | proselint/checks/misc/yelling.py | # -*- coding: utf-8 -*-
"""EES: Too much yelling..
---
layout: post
error_code: SCH
source: ???
source_url: ???
title: yelling
date: 2014-06-10 12:31:19
categories: writing
---
Too much yelling.
"""
from proselint.tools import blacklist
err = "MAU103"
msg = u"Too much yelling."
check = blacklist(["[A-Z]+ [A-Z]+ [A-Z]+"], err, msg, ignore_case=False)
| Python | 0 | |
26e7e7b270bfd5e08cf871f7d89b5a92b07df230 | add migration file | contmon/scraper/migrations/0001_initial.py | contmon/scraper/migrations/0001_initial.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
import model_utils.fields
class Migration(migrations.Migration):
replaces = [('scraper', '0001_initial'), ('scraper', '0002_auto_20150706_2105'), ('scraper', '0003_auto_20150706_2108'), ('scraper', '0004_auto_20150706_2110'), ('scraper', '0005_auto_20150706_2116')]
dependencies = [
]
operations = [
migrations.CreateModel(
name='WebsiteScraperConfig',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('domain', models.CharField(max_length=400, db_index=True)),
('selector_style', models.CharField(blank=True, max_length=100, choices=[(b'css', b'css'), (b'xpath', b'xpath')])),
('name_selector', models.CharField(max_length=100, blank=True)),
('image_selector', models.CharField(max_length=100, blank=True)),
('content_selector', models.CharField(max_length=100)),
('next_page_selector', models.CharField(max_length=100, blank=True)),
('tabs_selector', models.CharField(max_length=100, blank=True)),
],
options={
'abstract': False,
},
),
]
| Python | 0.000001 | |
927c0d62ab289fc866ce80a3c2d6ff630a13d660 | add compatibility layer with older seabreeze | src/seabreeze/compat.py | src/seabreeze/compat.py | """seabreeze.compat compatibility layer with 0.6.x seabreeze
seabreeze 0.6.x
"""
import warnings
def _deprecation_warning(old_method, new_feature_method, version):
msg = "{old_method} will be deprecated in version {ver}, use {feature} feature via f.{feature}.{method} instead"
feature, method = new_feature_method.split('.')
warnings.warn(
msg.format(old_method=old_method, feature=feature, method=method, ver=version),
PendingDeprecationWarning,
stacklevel=2
)
class DeprecatedSpectrometerMixin(object):
@property
def f(self):
"""implemented in class using mixin"""
return None
def close(self):
raise NotImplementedError("implemented in class using mixin")
@property
def integration_time_micros_limits(self):
"""implemented in class using mixin"""
return None, None
# Compatibility Layer Methods
# ===========================
#
# will all go away in version 2
#
def boxcar_width(self, boxcar_width):
_deprecation_warning('boxcar_width', 'spectrum_processing.set_boxcar_width', version='2')
self.f.spectrum_processing.set_boxcar_width(boxcar_width)
def scans_to_average(self, scans_to_average):
_deprecation_warning('scans_to_average', 'spectrum_processing.set_scans_to_average', version='2')
self.f.spectrum_processing.set_scans_to_average(self, scans_to_average)
def get_boxcar_width(self):
_deprecation_warning('get_boxcar_width', 'spectrum_processing.get_boxcar_width', version='2')
return self.f.spectrum_processing.get_boxcar_width()
def get_scans_to_average(self):
_deprecation_warning('get_scans_to_average', 'spectrum_processing.get_scans_to_average', version='2')
return self.f.spectrum_processing.get_scans_to_average()
@property
def minimum_integration_time_micros(self):
return self.integration_time_micros_limits[0]
@property
def light_sources(self):
_deprecation_warning('light_sources', 'light_source.xxx', version='2')
N = self.f.light_source.get_count()
return tuple(DeprecatedLightSource(self.f.light_source, i) for i in range(N))
def eeprom_read_slot(self, slot):
_deprecation_warning('eeprom_read_slot', 'eeprom.eeprom_read_slot', version='2')
return self.f.eeprom.eeprom_read_slot(slot)
def tec_set_enable(self, enable):
_deprecation_warning('tec_set_enable', 'thermo_electric.enable_tec', version='2')
self.f.thermo_electric.enable_tec(enable)
# noinspection PyPep8Naming
def tec_set_temperature_C(self, set_point_C):
_deprecation_warning('tec_set_temperature_C',
'thermo_electric.set_temperature_setpoint_degrees_celsius', version='2')
self.f.thermo_electric.set_temperature_setpoint_degrees_celsius(set_point_C)
# noinspection PyPep8Naming
def tec_get_temperature_C(self):
_deprecation_warning('tec_get_temperature_C',
'thermo_electric.read_temperature_degrees_celsius', version='2')
return self.f.thermo_electric.read_temperature_degrees_celsius()
def lamp_set_enable(self, enable):
_deprecation_warning('lamp_set_enable',
'strobe_lamp.enable_lamp', version='2')
self.f.strobe_lamp.enable_lamp(enable)
def shutter_set_open(self, state):
_deprecation_warning('shutter_set_open',
'shutter.set_shutter_open', version='2')
self.f.shutter.set_shutter_open(state)
def stray_light_coeffs(self):
_deprecation_warning('stray_light_coeffs',
'stray_light_coefficients.get_stray_light_coefficients', version='2')
return self.f.stray_light_coefficients.get_stray_light_coefficients()
def irrad_calibration(self):
_deprecation_warning('irrad_calibration',
'irrad_cal.read_calibration', version='2')
return self.f.irrad_cal.read_calibration()
def irrad_calibration_collection_area(self):
_deprecation_warning('irrad_calibration_collection_area',
'irrad_cal.read_collection_area', version='2')
return self.f.irrad_cal.read_collection_area()
def continuous_strobe_set_enable(self, enable):
_deprecation_warning('continuous_strobe_set_enable',
'continuous_strobe.set_enable', version='2')
self.f.continuous_strobe.set_enable(enable)
def continuous_strobe_set_period_micros(self, period_micros):
_deprecation_warning('continuous_strobe_set_period_micros',
'continuous_strobe.set_period_micros', version='2')
self.f.continuous_strobe.set_period_micros(period_micros)
def __enter__(self):
warnings.warn(
"context manager functionality will be a noop in version 2",
PendingDeprecationWarning,
)
return self
# noinspection PyShadowingBuiltins
def __exit__(self, type, value, traceback):
warnings.warn(
"context manager functionality will be a noop in version 2",
PendingDeprecationWarning,
)
self.close()
class DeprecatedLightSource(object):
def __init__(self, spectrometer, index):
self._id = "{}:{}".format(spectrometer.model, spectrometer.serial_number)
self._f, self._idx = spectrometer.f.light_source, index
self._has_enable = self._f.has_enable(self._idx)
self._has_varint = self._f.has_variable_intensity(self._idx)
def set_enable(self, enable):
if self._has_enable:
self._f.set_enable(self._idx, enable)
else:
from seabreeze.spectrometers import SeaBreezeError
raise SeaBreezeError("Light source #%d can't be enabled or disabled.")
def set_intensity(self, intensity):
if self._has_varint:
self._f.set_intensity(self._idx, intensity)
else:
from seabreeze.spectrometers import SeaBreezeError
raise SeaBreezeError("Light source #%d intensity can't be set.")
def get_intensity(self):
from seabreeze.spectrometers import SeaBreezeError
try:
return self._f.get_intensity(self._idx)
except SeaBreezeError:
raise
def __repr__(self):
return "<DeprecatedLightSource #%d at %s>" % (self._idx, self._id)
| Python | 0 | |
7fb2b02c7c08912f54ef3cc0f22c53daa34ec639 | Add accelerometer and crash analysis | analysis/plot_accelerometer.py | analysis/plot_accelerometer.py | """Plots the accelerometer readings for x, y, and z."""
from dateutil import parser as dateparser
from matplotlib import pyplot
import json
import sys
def main():
if sys.version_info.major <= 2:
print('Please use Python 3')
sys.exit(1)
if len(sys.argv) != 2:
print('Usage: plot_accelerometer.py <log file>')
sys.exit(1)
with open(sys.argv[1]) as file_:
lines = file_.readlines()
first_stamp = timestamp(lines[0])
acceleration_g_x = []
acceleration_g_y = []
acceleration_g_z = []
acceleration_times = []
not_moving_times = []
run_times = []
stop_times = []
for line in lines:
if 'acceleration_g_x' in line:
data = json.loads(line[line.find('{'):])
acceleration_g_x.append(data['acceleration_g_x'])
acceleration_g_y.append(data['acceleration_g_y'])
acceleration_g_z.append(data['acceleration_g_z'])
acceleration_times.append(timestamp(line) - first_stamp)
elif 'not moving according' in line:
not_moving_times.append(timestamp(line) - first_stamp)
elif 'Received run command' in line:
run_times.append(timestamp(line) - first_stamp)
elif 'Received stop command' in line or 'No waypoints, stopping' in line:
stop_times.append(timestamp(line) - first_stamp)
pyplot.plot(acceleration_times, acceleration_g_x)
pyplot.scatter(not_moving_times, [0.25] * len(not_moving_times), color='blue')
pyplot.scatter(run_times, [0.3] * len(run_times), color='green')
pyplot.scatter(stop_times, [0.35] * len(stop_times), color='red')
pyplot.draw()
pyplot.show()
pyplot.plot(acceleration_times, acceleration_g_y)
pyplot.scatter(not_moving_times, [-0.25] * len(not_moving_times), color='blue')
pyplot.scatter(run_times, [-0.3] * len(run_times), color='green')
pyplot.scatter(stop_times, [-0.35] * len(stop_times), color='red')
pyplot.draw()
pyplot.show()
pyplot.plot(acceleration_times, acceleration_g_z)
pyplot.scatter(not_moving_times, [-0.75] * len(not_moving_times), color='blue')
pyplot.scatter(run_times, [-0.7] * len(run_times), color='green')
pyplot.scatter(stop_times, [-0.65] * len(stop_times), color='red')
pyplot.draw()
pyplot.show()
pyplot.plot(acceleration_times, acceleration_g_x)
pyplot.plot(acceleration_times, [i + 0.05 for i in acceleration_g_y])
pyplot.plot(acceleration_times, [i - 0.93 for i in acceleration_g_z])
pyplot.scatter(not_moving_times, [0.25] * len(not_moving_times), color='blue')
pyplot.scatter(run_times, [0.3] * len(run_times), color='green')
pyplot.scatter(stop_times, [0.35] * len(stop_times), color='red')
pyplot.draw()
pyplot.show()
def timestamp(line):
"""Returns the timestamp of a log line."""
dt = dateparser.parse(line[:line.find(',')])
comma = line.find(',')
millis = float(line[comma + 1:line.find(':', comma)])
return dt.timestamp() + millis / 1000.
if __name__ == '__main__':
main()
| Python | 0 | |
1b0c33c01b179831edc29b0b13a3f60e96b54321 | Create joyent.py | joyent.py | joyent.py | #!/usr/bin/env python
import os
import sys
import cPickle as pickle
from datetime import datetime
from smartdc import DataCenter
try:
import json
except ImportError:
import simplejson as json
debug = False
CACHE_EXPIRATION_IN_SECONDS = 300
SERVER_FILENAME = "joyent_server_cache.txt"
##
PATH_TO_FILE = os.getenv('HELPER')
joyent_key_id = "/" + os.environ['JOYENT_USERNAME'] + "/keys/" + os.environ['JOYENT_KEYNAME']
joyent_secret = os.environ['HOME'] + "/.ssh/id_rsa"
joyent_api = ['JOYENT_API_URL']
joyent_location = "eu-ams-1.api.joyentcloud.com"
if PATH_TO_FILE and os.path.isdir(PATH_TO_FILE) :
SERVER_FILENAME = PATH_TO_FILE + "/" + SERVER_FILENAME
if debug:
print SERVER_FILENAME
def getInventory():
servers = getServers()
inventory = {}
for server in servers:
group = server.type
if group is None:
group = 'ungrouped'
if not group in inventory:
inventory[group] = []
inventory[group].append(server.name)
return inventory
def getHost(hostname):
servers = getServers()
allhosts = {}
for server in servers:
## How to connect
if server.public_ips:
ssh_connection = server.public_ips[0]
elif server.private_ips:
ssh_connection = server.private_ips[0]
else:
ssh_connection = server.name
allhosts[server.name] = {
"joyent_id": server.id,
"joyent_public_ip": server.public_ips,
"joyent_private_ip": server.private_ips,
"ansible_ssh_host": ssh_connection
}
##SmartOS python
if server.type == "smartmachine":
allhosts[server.name]["ansible_python_interpreter"] = "/opt/local/bin/python"
allhosts[server.name]["ansible_ssh_user"] = "root"
return allhosts.get(hostname)
def getServers():
if not os.path.isfile(SERVER_FILENAME):
return retrieveServerList()
stats = os.stat(SERVER_FILENAME)
modification_time = stats.st_mtime
seconds_since_last_modified = (datetime.now() - datetime.fromtimestamp(modification_time)).total_seconds()
if debug:
print seconds_since_last_modified
if seconds_since_last_modified < CACHE_EXPIRATION_IN_SECONDS:
if debug:
print "retireving servers from cache..."
return fetchServersFromCache()
else:
return retrieveServerList()
def retrieveServerList():
""" Check cache period either read from cache or call api
"""
if debug:
print "retireving servers from the API..."
sdc = DataCenter(location=joyent_location, key_id=joyent_key_id, secret=joyent_secret, verbose=debug)
servers = sdc.machines()
storeServersToCache(servers)
return servers
class MyServer(object):
def __init__(self, name, type, public_ips, private_ips, id):
self.name = name
self.type = type
self.id = id
self.private_ips = private_ips
self.public_ips = public_ips
def fetchServersFromCache():
return pickle.load(open(SERVER_FILENAME, "rb"))
def storeServersToCache(servers):
myservers = [MyServer(server.name, server.type, server.public_ips, server.private_ips, server.id) for server in servers]
pickle.dump(myservers, open(SERVER_FILENAME, "wb"))
if __name__ == '__main__':
if debug:
print "using id_rsa" + joyent_secret + " with '" + joyent_key_id + "'"
if len(sys.argv) == 2 and (sys.argv[1] == '--list'):
print json.dumps(getInventory(), indent=4)
elif len(sys.argv) == 3 and (sys.argv[1] == '--host'):
print json.dumps(getHost(sys.argv[2]), indent=4)
else:
print "Usage: %s --list or --host <hostname>" % sys.argv[0]
sys.exit(1)
| Python | 0.000005 | |
872dd45173e889db06e9b16105492c241f7badae | Add an example for dynamic RPC lookup. | examples/rpc_dynamic.py | examples/rpc_dynamic.py | import asyncio
import aiozmq
import aiozmq.rpc
class DynamicHandler(aiozmq.rpc.AttrHandler):
def __init__(self, namespace=()):
self.namespace = namespace
def __getitem__(self, key):
try:
return getattr(self, key)
except AttributeError:
return DynamicHandler(self.namespace + (key,))
@aiozmq.rpc.method
def func(self):
return (self.namespace, 'val')
@asyncio.coroutine
def go():
server = yield from aiozmq.rpc.start_server(
DynamicHandler(), bind='tcp://*:*')
server_addr = next(iter(server.transport.bindings()))
client = yield from aiozmq.rpc.open_client(
connect=server_addr)
ret = yield from client.rpc.func()
assert ((), 'val') == ret, ret
ret = yield from client.rpc.a.func()
assert (('a',), 'val') == ret, ret
ret = yield from client.rpc.a.b.func()
assert (('a', 'b'), 'val') == ret, ret
server.close()
client.close()
def main():
asyncio.set_event_loop_policy(aiozmq.ZmqEventLoopPolicy())
asyncio.get_event_loop().run_until_complete(go())
print("DONE")
if __name__ == '__main__':
main()
| Python | 0 | |
e0b1bea00c56657ef9fb4456203a522920375cc2 | add testLCMSpy.py script | software/ddapp/src/python/tests/testLCMSpy.py | software/ddapp/src/python/tests/testLCMSpy.py | from ddapp.consoleapp import ConsoleApp
from ddapp import lcmspy
from ddapp import lcmUtils
from ddapp import simpletimer as st
app = ConsoleApp()
app.setupGlobals(globals())
if app.getTestingInteractiveEnabled():
app.showPythonConsole()
lcmspy.findLCMModulesInSysPath()
timer = st.SimpleTimer()
stats = {}
channelToMsg = {}
items = {}
def item(r, c):
rowDict = items.setdefault(r, {})
try:
return rowDict[c]
except KeyError:
i = QtGui.QTableWidgetItem('')
table.setItem(r, c, i)
rowDict[c] = i
return i
def printStats():
print '\n------------------------\n'
averages = [(channel, stat.getAverage()) for channel, stat in stats.iteritems()]
averages.sort(key=lambda x: x[1])
table.setRowCount(len(averages))
i = 0
for channel, bytesPerSecond in reversed(averages):
print channel, '%.3f kbps' % (bytesPerSecond/1024.0)
item(i, 0).setText(channel)
item(i, 1).setText(channelToMsg[channel])
item(i, 2).setText('%.3f kbps' % (bytesPerSecond/1024.0))
i += 1
def onMessage(messageData, channel):
messageData = str(messageData)
msgType = lcmspy.getMessageClass(messageData)
if not msgType:
#print 'failed decode:', channel
pass
else:
name = lcmspy.getMessageTypeFullName(msgType)
stat = stats.get(channel)
if not stat:
stat = st.AverageComputer()
stats[channel] = stat
stat.update(len(messageData))
if channel not in channelToMsg:
channelToMsg[channel] = lcmspy.getMessageTypeFullName(msgType) if msgType else '<unknown msg type>'
if timer.elapsed() > 3:
printStats()
timer.reset()
for stat in stats.values():
stat.reset()
#msg = lcmspy.decodeMessage(messageData)
sub = lcmUtils.addSubscriber(channel='.+', callback=onMessage)
sub.setNotifyAllMessagesEnabled(True)
from PythonQt import QtGui, QtCore
table = QtGui.QTableWidget()
table.setColumnCount(3)
table.setHorizontalHeaderLabels(['channel', 'type', 'bandwidth'])
table.verticalHeader().setVisible(False)
table.show()
app.start()
| Python | 0.000001 | |
cb2cc713c29c20ba239a60b6151c5e5c001c8e0b | Add joinkb.py | joinkb.py | joinkb.py | from __future__ import print_function
__module_name__ = 'Join Kickban'
__module_version__ = '0.1'
__module_description__ = 'Kickbans clients from specified channels on regex match against their nickname on join'
__author__ = 'Daniel A. J.'
import hexchat
import re
re = re.compile(r'\bfoo\b') # regex pattern to be matched against in user's nickname
check_channels = ['#test', '#fooness'] # channel(s) where script is active
net = 'freenode' # network where script is active
def join_search(word, word_eol, userdata):
channel = word[2]
user_nickname = ''.join(word[0][1:word[0].index('!')])
user_host = ''.join(word[0][word[0].index('@'):])
for x in check_channels:
if re.search(user_nickname) != None and channel == x and hexchat.get_info("network") == net:
hexchat.command("mode %s +b *!*%s" % (channel, user_host))
hexchat.command("kick %s regex pattern detected" % user_nickname)
return hexchat.EAT_ALL
def unload_joinkb(userdata):
print(__module_name__, 'version', __module_version__, 'unloaded.')
hexchat.hook_server("JOIN", join_search)
hexchat.hook_unload(unload_joinkb)
print(__module_name__, 'version', __module_version__, 'loaded.')
| Python | 0.000005 | |
f6864179a2dc1c531afc2c3ba6be300006e01fab | Create consecZero.py | Codingame/Python/Clash/consecZero.py | Codingame/Python/Clash/consecZero.py | import sys
import math
# Auto-generated code below aims at helping you parse
# the standard input according to the problem statement.
n = input()
# Write an action using print
# To debug: print("Debug messages...", file=sys.stderr)
c = 0
t = 0
for x in n:
if x == '0':
t += 1
else:
if t > c:
c = t
t = 0
if t > c:
c = t
print(c)
| Python | 0.000001 | |
6a4fb74befd22c2bc814dbe51a1fa884a077be9d | Create django_audit_snippets.py | example_code/django_audit_snippets.py | example_code/django_audit_snippets.py | from django.conf import settings
from urls import urlpatterns
'''
Access shell via
./manage.py shell
(or shell_plus if you have django-extensions)
Dont forget you may need to set environment variables:
- DJANGO_SETTINGS_MODULE to the settings file (python module load syntax like settings.filename) and
- PYTHONPATH to include the path where the Django code sits
Install ipython and django-extensions to get a better shell (shell_plus)
pip install django-extensions
This also has show_urls command which will do something similar to get_urls_friendly below
urls will not contain urlpatterns in later django releases
'''
# all the configured apps settings are now in here
settings
# this prints out mapped urls and associated views
def get_urls_friendly(raw_urls, nice_urls=[], urlbase=''):
'''Recursively builds a list of all the urls in the current project and the name of their associated view'''
for entry in raw_urls:
fullurl = (urlbase + entry.regex.pattern).replace('^','')
if entry.callback:
viewname = entry.callback.func_name
nice_urls.append('%s - %s' %(fullurl, viewname))
else:
get_urls_friendly(entry.url_patterns, nice_urls, fullurl)
nice_urls = sorted(list(set(nice_urls)))
return nice_urls
| Python | 0.000004 | |
73819cea7150e15212a014f9c3a42a69d0351ab8 | Create cutrope.py | cutrope.py | cutrope.py | # Author: Vikram Raman
# Date: 08-15-2015
import time
# Given a rope with length n, how to cut the rope into m parts with length n[0], n[1], ..., n[m-1],
# in order to get the maximal product of n[0]*n[1]* ... *n[m-1]?
# We have to cut once at least. Additionally, the length of the whole length of the rope,
# as well as the length of each part, are in integer value.
# For example, if the length of the rope is 8,
# the maximal product of the part lengths is 18.
# In order to get the maximal product,
# the rope is cut into three parts with lengths 2, 3, and 3 respectively.
# immediate thoughts: this is a dynamic programming knapsack kind of problem
def cutrope(l):
d = [0, 1]
for i in range(2, l+1):
maxVal = 0
for j in range(1, i):
maxVal = max(j * d[i-j], j * (i-j), maxVal)
d.append(maxVal)
print d
l = 8
start_time = time.clock()
cutrope(l)
print("--- %s seconds ---" % (time.clock() - start_time))
| Python | 0 | |
6b95af9822b9d94793eef503609b48d83066f594 | add test that causes KeyError for disabled text | test/test-text-diabled.py | test/test-text-diabled.py | from framework import *
root.title("Disabled text")
canv.create_text(200, 200,
text = "Test disabled text",
font = ("Times", 20),
state = DISABLED
)
thread.start_new_thread(test, (canv, __file__, True))
root.mainloop()
| Python | 0 | |
06efe8a8be913fb63f27016268d86f1ad0a5bcdf | Add test_engine_seed.py | tests/test_engine_seed.py | tests/test_engine_seed.py | # -*- coding: utf-8 -*-
# Copyright (c) 2015-2016 MIT Probabilistic Computing Project
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cgpm.crosscat.engine import Engine
from cgpm.utils import general as gu
def test_engine_simulate_no_repeat():
"""Generate 3 samples from 2 states 10 times, and ensure uniqueness."""
rng = gu.gen_rng(1)
engine = Engine(X=[[1]], cctypes=['normal'], num_states=2, rng=rng)
samples_list = [
[s[0] for s in engine.simulate(rowid=-i, query=[0], N=3)[0]]
for i in xrange(10)
]
samples_set = set([frozenset(s) for s in samples_list])
assert len(samples_set) == len(samples_list)
| Python | 0.000039 | |
3de2b08133f6f721a3a30120a93b81be0eacefb6 | add tests for the scuba.filecleanup sub-module | tests/test_filecleanup.py | tests/test_filecleanup.py | from __future__ import print_function
from nose.tools import *
from unittest import TestCase
try:
from unittest import mock
except ImportError:
import mock
from scuba.filecleanup import FileCleanup
def assert_set_equal(a, b):
assert_equal(set(a), set(b))
class TestFilecleanup(TestCase):
@mock.patch('os.remove')
def test_files_tracked(self, os_remove_mock):
'''FileCleanup.files works'''
fc = FileCleanup()
fc.register('foo.txt')
fc.register('bar.bin')
assert_set_equal(fc.files, ['foo.txt', 'bar.bin'])
@mock.patch('os.remove')
def test_basic_usage(self, os_remove_mock):
'''FileCleanup removes one file'''
fc = FileCleanup()
fc.register('foo.txt')
fc.cleanup()
os_remove_mock.assert_any_call('foo.txt')
@mock.patch('os.remove')
def test_multiple_files(self, os_remove_mock):
'''FileCleanup removes multiple files'''
fc = FileCleanup()
fc.register('foo.txt')
fc.register('bar.bin')
fc.register('/something/snap.crackle')
fc.cleanup()
os_remove_mock.assert_any_call('bar.bin')
os_remove_mock.assert_any_call('foo.txt')
os_remove_mock.assert_any_call('/something/snap.crackle')
@mock.patch('os.remove')
def test_multiple_files(self, os_remove_mock):
'''FileCleanup ignores os.remove() errors'''
def os_remove_se(path):
if path == 'INVALID':
raise OSError('path not found')
os_remove_mock.side_effect = os_remove_se
fc = FileCleanup()
fc.register('foo.txt')
fc.register('bar.bin')
fc.register('INVALID')
fc.cleanup()
os_remove_mock.assert_any_call('bar.bin')
os_remove_mock.assert_any_call('foo.txt')
assert_set_equal(fc.files, [])
| Python | 0 | |
d41005d14239a93237fb839084f029208b94539d | Use the custom.js as served from the CDN for try | common/profile_default/ipython_notebook_config.py | common/profile_default/ipython_notebook_config.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Configuration file for ipython-notebook.
c = get_config()
c.NotebookApp.ip = '*'
c.NotebookApp.open_browser = False
c.NotebookApp.port = 8888
# Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-
# For headerssent by the upstream reverse proxy. Necessary if the proxy handles
# SSL
c.NotebookApp.trust_xheaders = True
# Include our extra templates
c.NotebookApp.extra_template_paths = ['/srv/templates/']
# Supply overrides for the tornado.web.Application that the IPython notebook
# uses.
c.NotebookApp.tornado_settings = {
'headers': {
'Content-Security-Policy': "frame-ancestors 'self' https://*.jupyter.org https://jupyter.github.io https://*.tmpnb.org"
},
'static_url_prefix': 'https://cdn.jupyter.org/notebook/try/'
}
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Configuration file for ipython-notebook.
c = get_config()
c.NotebookApp.ip = '*'
c.NotebookApp.open_browser = False
c.NotebookApp.port = 8888
# Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-
# For headerssent by the upstream reverse proxy. Necessary if the proxy handles
# SSL
c.NotebookApp.trust_xheaders = True
# Include our extra templates
c.NotebookApp.extra_template_paths = ['/srv/templates/']
# Supply overrides for the tornado.web.Application that the IPython notebook
# uses.
c.NotebookApp.tornado_settings = {
'headers': {
'Content-Security-Policy': "frame-ancestors 'self' https://*.jupyter.org https://jupyter.github.io https://*.tmpnb.org"
},
'static_url_prefix': 'https://cdn.jupyter.org/notebook/3.1.0/'
}
| Python | 0 |
2b380d501b80afad8c7c5ec27537bcc682ed2775 | Fix some scope mistakes. This fix was part of the reverted commit. | commands/handle.py | commands/handle.py | import commands.cmds as cmds
def handle(self, chat_raw):
self.logger.info("Handling command: " + chat_raw + " (for player" + self.fquid + ")")
_atmp1 = chat_raw.split(" ")
_atmp2 = list(_atmp1[0])
del _atmp2[0]
del _atmp1[0]
cmdobj = {
"base": _atmp2,
"args_raw": _atmp1,
"scope": self,
"chat_raw": chat_raw
}
cmds.InvalidCommand.begin(self, cmdobj) if _atmp2 not in cmds.baseList else cmds.baseList[_atmp2].begin(self, cmdobj) | import commands.cmds as cmds
def handle(self, chat_raw):
self.logger.info("Handling command: " + chat_raw + " (for player" + self.fquid + ")")
_atmp1 = chat_raw.split(" ")
_atmp2 = list(_atmp1[0])
del _atmp2[0]
del _atmp1[0]
cmdobj = {
"base": _atmp2,
"args_raw": _atmp1,
"scope": self,
"chat_raw": chat_raw
}
commands.cmds.InvalidCommand.begin(self, cmdobj) if _atmp2 not in commands.cmds.baseList else commands.cmds.baseList[_atmp2].begin(self, cmdobj) | Python | 0 |
b37f31b5adbdda3e5d40d2d8a9dde19b2e305c2c | Add tests for the controller module | ckanext/wirecloudview/tests/test_controller.py | ckanext/wirecloudview/tests/test_controller.py | # -*- coding: utf-8 -*-
# Copyright (c) 2018 Future Internet Consulting and Development Solutions S.L.
# This file is part of CKAN WireCloud View Extension.
# CKAN WireCloud View Extension is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# CKAN WireCloud View Extension is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with CKAN WireCloud View Extension. If not, see <http://www.gnu.org/licenses/>.
# This file is part of CKAN Data Requests Extension.
import json
import unittest
from mock import DEFAULT, patch
from ckanext.wirecloudview.controller import WireCloudViewController
class WirecloudViewControllerTest(unittest.TestCase):
@patch.multiple("ckanext.wirecloudview.controller", request=DEFAULT, get_plugin=DEFAULT, toolkit=DEFAULT, OAuth2Session=DEFAULT, response=DEFAULT)
def test_get_workspaces(self, request, get_plugin, toolkit, OAuth2Session, response):
self.controller = WireCloudViewController()
self.controller.client_id = "aclientid"
request.params = {
'incomplete': 'key words',
'limit': '20',
}
get_plugin().wirecloud_url = "https://dashboards.example.org"
oauth = OAuth2Session()
OAuth2Session.reset_mock()
oauth.get().json.return_value = {
"results": [
{"owner": "user1", "name": "dashboard1"},
{"owner": "user2", "name": "other-dashboard"},
]
}
oauth.get.reset_mock()
response.headers = {}
result = self.controller.get_workspaces()
self.assertEqual(
json.loads(result.decode('utf-8')),
{
"ResultSet": {
"Result": [
{"Name": "user1/dashboard1"},
{"Name": "user2/other-dashboard"},
]
}
}
)
self.assertEqual(response.headers[b'Content-Type'], b"application/json")
OAuth2Session.assert_called_once_with(self.controller.client_id, token=toolkit.c.usertoken)
oauth.get.assert_called_once_with("https://dashboards.example.org/api/search?namespace=workspace&q=key+words&maxresults=20")
| Python | 0 | |
d1d1892551d805b5a73aaef07932c65fd375e342 | Add Rules unit test | py/desisurvey/test/test_rules.py | py/desisurvey/test/test_rules.py | import unittest
import numpy as np
import desisurvey.tiles
from desisurvey.rules import Rules
class TestRules(unittest.TestCase):
def setUp(self):
pass
def test_rules(self):
rules = Rules()
tiles = desisurvey.tiles.get_tiles()
completed = np.ones(tiles.ntiles, bool)
rules.apply(completed)
completed[:] = False
rules.apply(completed)
gen = np.random.RandomState(123)
for i in range(10):
completed[gen.choice(tiles.ntiles, tiles.ntiles // 10, replace=False)] = True
rules.apply(completed)
def test_suite():
"""Allows testing of only this module with the command::
python setup.py test -m <modulename>
"""
return unittest.defaultTestLoader.loadTestsFromName(__name__)
| Python | 0 | |
6350092030d267621d2430d4505c01455d1de2d3 | Create Misha_rungaKutta.py | math/runge-kutta_method/Misha_rungaKutta.py | math/runge-kutta_method/Misha_rungaKutta.py | import matplotlib.pyplot as plt
# Python program to implement Runge Kutta method
def dydx(x, y):
return (18 * x + 1.33 * y) / (1.33 * x + 18 * y)
# Finds value of y for a given x using step size h
# and initial value y0 at x0.
def rungeKutta(x0, y0, x, h, Q1=0.5, Q2=0.5, w1=0.5, w2=0.5, c1=1, c2=2, c3=2, c4=1):
# Count number of iterations using step size or step height h
yn = []
n = (int)((x - x0) / h)
# Iterate for number of iterations.
y = y0
for i in range(1, n + 1):
"Apply Runge Kutta Formulas to find next value of y"
k1 = h * dydx(x0, y)
k2 = h * dydx(x0 + Q1 * h, y + w1 * k1)
k3 = h * dydx(x0 + Q2 * h, y + w2 * k2)
k4 = h * dydx(x0 + h, y + k3)
# Update next value of y
y = y + (1.0 / (c1 + c2 + c3 + c4)) * \
(c1 * k1 + c2 * k2 + c3 * k3 + c4 * k4)
yn.append(y)
# Update next value of x
x0 = x0 + h
# print("value of yn ",yn[-1])
return yn
def rootMeanSquareError(standard,predicted):
sum = 0
for a, p in zip(standard, predicted):
err = (p-a) ** 2
sum += err
mean = sum/len(standard)
rmse = mean**(1/2)
return rmse
# Driver method
x0 = 0
y = 18 / 1.33
x = 18
h = 0.5
# reducing errors by using modifying values of w,q,ci
def compare():
errList = []
y_standard = rungeKutta(x0, y, x, h)
for i in range(1, 11):
y_modified = rungeKutta(x0, y, x, h, 0.8, 0.5, 0.8, 0.4, 2, 3, 4, 0)
err = rootMeanSquareError(y_modified, y_standard)
errList.append(err)
print(errList)
print("minimum error", min(errList), errList.index(min(errList))+1)
return errList
n = [h,h/2,h/4,h/8,h/16]
def calc():
List = []
for i in (n):
a = rungeKutta(x0, y, x, i)
b = rungeKutta(x0, y, x, i, 0.8, 0.5, 0.8, 0.4, 2, 3, 4, 0)
c = rootMeanSquareError(b, a)
print("A ",a[-1]," b ",b[-1]," c ",c )
List.append(c)
print("error", c)
print("error list", List)
return List
# u = compare()
# print("compare", u)
y = calc()
# plotting the points
plt.plot(n, y)
plt.xlabel('Interval step size (h)')
plt.ylabel('Root mean square error')
plt.title('Error Analysis Graph!')
plt.show()
| Python | 0 | |
3efa20e0d93c922bec6ae0f41774fd406532257a | Allow manually graded code cells | nbgrader/preprocessors/checkcellmetadata.py | nbgrader/preprocessors/checkcellmetadata.py | from nbgrader import utils
from nbgrader.preprocessors import NbGraderPreprocessor
class CheckCellMetadata(NbGraderPreprocessor):
"""A preprocessor for checking that grade ids are unique."""
def preprocess(self, nb, resources):
resources['grade_ids'] = ids = []
nb, resources = super(CheckCellMetadata, self).preprocess(nb, resources)
id_set = set([])
for grade_id in ids:
if grade_id in id_set:
raise RuntimeError("Duplicate grade id: {}".format(grade_id))
id_set.add(grade_id)
return nb, resources
def preprocess_cell(self, cell, resources, cell_index):
if utils.is_grade(cell):
# check for blank grade ids
grade_id = cell.metadata.nbgrader.get("grade_id", "")
if grade_id == "":
raise RuntimeError("Blank grade id!")
resources['grade_ids'].append(grade_id)
# check for valid points
points = cell.metadata.nbgrader.get("points", "")
try:
points = float(points)
except ValueError:
raise RuntimeError(
"Point value for grade cell {} is invalid: {}".format(
grade_id, points))
# check that markdown cells are grade AND solution (not either/or)
if cell.cell_type == "markdown" and utils.is_grade(cell) and not utils.is_solution(cell):
raise RuntimeError(
"Markdown grade cell '{}' is not marked as a solution cell".format(
grade_id))
if cell.cell_type == "markdown" and not utils.is_grade(cell) and utils.is_solution(cell):
raise RuntimeError(
"Markdown solution cell (index {}) is not marked as a grade cell".format(
cell_index))
return cell, resources
| from nbgrader import utils
from nbgrader.preprocessors import NbGraderPreprocessor
class CheckCellMetadata(NbGraderPreprocessor):
"""A preprocessor for checking that grade ids are unique."""
def preprocess(self, nb, resources):
resources['grade_ids'] = ids = []
nb, resources = super(CheckCellMetadata, self).preprocess(nb, resources)
id_set = set([])
for grade_id in ids:
if grade_id in id_set:
raise RuntimeError("Duplicate grade id: {}".format(grade_id))
id_set.add(grade_id)
return nb, resources
def preprocess_cell(self, cell, resources, cell_index):
if utils.is_grade(cell):
# check for blank grade ids
grade_id = cell.metadata.nbgrader.get("grade_id", "")
if grade_id == "":
raise RuntimeError("Blank grade id!")
resources['grade_ids'].append(grade_id)
# check for valid points
points = cell.metadata.nbgrader.get("points", "")
try:
points = float(points)
except ValueError:
raise RuntimeError(
"Point value for grade cell {} is invalid: {}".format(
grade_id, points))
# check that code cells are grade OR solution (not both)
if cell.cell_type == "code" and utils.is_grade(cell) and utils.is_solution(cell):
raise RuntimeError(
"Code grade cell '{}' is also marked as a solution cell".format(
grade_id))
# check that markdown cells are grade AND solution (not either/or)
if cell.cell_type == "markdown" and utils.is_grade(cell) and not utils.is_solution(cell):
raise RuntimeError(
"Markdown grade cell '{}' is not marked as a solution cell".format(
grade_id))
if cell.cell_type == "markdown" and not utils.is_grade(cell) and utils.is_solution(cell):
raise RuntimeError(
"Markdown solution cell (index {}) is not marked as a grade cell".format(
cell_index))
return cell, resources
| Python | 0 |
c6b9ef93b8d20589d454e2c63bba60fe383975b5 | Add files via upload | erasure.py | erasure.py | #!/usr/bin/env python
import numpy as np
import random
import hashlib
'''
Reed Solomon Encoding
data - column vector array
sz - integer length of data
Encodes data and returns a code that can be decoded
'''
class ErasureCoding():
def __init__(self):
pass
def _encode(self, x_vector, xform):
'''Do a Reed Solomon encoding of a vector of data
Keyword Arguemts:
x_vector -- numpy vector of data
xform -- numpy array to transform data
returns transformed vector
'''
res = np.dot(xform, x_vector)
return res
def _decode(self, code, inv):
'''Decode data that has been transfomed by
Reed Solomon transformation
Keyword Arguments:
code -- Encodeing data in a numpy array
inv -- Inverse Reed Solomon transformation in
numpy matrix
returns transformed vector
'''
return(np.dot(inv, code))
def chunks(self, data, ch_sz):
'''Convert an array of data into chunks
Keywords arguments:
data -- the data to be converted
ch_sz -- chunk size
returns a generator with the chunk
'''
for ii in xrange(0, len(data), ch_sz):
yield data[ii:ii + ch_sz]
def rs_read(self, _dd):
_out = []
_buf = []
for ii in self.chunks(_dd, self.ndim):
data = np.array(ii)
_buf[:] = [ chr(x) for x in data]
_out += _buf
output = "".join(_out)
output = output[:-self.pad_len -1 or None]
return output
def rs_write(self, _data):
'''
'''
Id = np.identity(self.ndim)
b = np.array([[0,0,1,0,0],[0,0,0,1,1],[0,0,0,0,1]])
B = np.vstack((Id, b))
bad_rows = [2,3,4]
B_prime = np.delete(B, bad_rows , 0)
B_prime_inv = np.linalg.inv(B_prime)
m = hashlib.md5()
m.update(_data)
print m.hexdigest()
_d_len = len(_data)
self.pad_len = _d_len % self.ndim
for ii in xrange(0, self.pad_len + 1):
_data += '0'
_dd = []
_dd[:] = [ ord(x) for x in _data ]
#self.dest_arr.node
return _dd
def rs(self, _data):
'''
'''
self.ndim = 5
self.mdim = 3
dd = self.rs_write(_data)
print self.rs_read(dd)
def main():
ec = ErasureCoding()
ec.dest_arr = []
node_1 = {}
node_2 = {}
node_3 = {}
node_4 = {}
node_5 = {}
ec.dest_arr.append(node_1)
ec.dest_arr.append(node_2)
ec.dest_arr.append(node_3)
ec.dest_arr.append(node_4)
ec.dest_arr.append(node_5)
ec.rs("holy smokes bat man! would you look at that!")
ec.rs("The ugly man rides a big motorcycle")
ec.rs("There has also been White House conflict with Cabinet members such as Treasury Secretary Steven Mnuchin, who has vented to friends that Priebus has blocked his choice for deputy secretary, Goldman Sachs managing director Jim Donovan, according to one person familiar with the talks. Secretary of State Rex Tillerson, meanwhile, has complained that the chief of staff is picking who will get plum ambassador posts without always consulting others, said another person familiar with that situation.")
if __name__ == '__main__':
main()
| Python | 0.000001 | |
22e8cc6200cafd5cec386c35142cd742d4a2a735 | add problem 34 | problem_034.py | problem_034.py | #!/usr/bin/env python
#-*-coding:utf-8-*-
'''
145 is a curious number, as 1! + 4! + 5! = 1 + 24 + 120 = 145.
Find the sum of all numbers which are equal to
the sum of the factorial of their digits.
Note: as 1! = 1 and 2! = 2 are not sums they are not included.
'''
import math
import timeit
def calc():
eqs = []
for i in range(3, 2177280):
if i == sum(map(lambda j: math.factorial(j), map(int, list(str(i))))):
eqs.append(i)
return eqs
if __name__ == '__main__':
print calc()
print timeit.Timer('problem_034.calc()', 'import problem_034').timeit(1)
| Python | 0.00477 | |
f64068b7b6e50f9280b51831715df8cb4c586daa | Update merge person tool | project/apps/api/management/commands/merge_persons.py | project/apps/api/management/commands/merge_persons.py | from optparse import make_option
from django.core.management.base import (
BaseCommand,
CommandError,
)
from apps.api.models import (
Person,
Singer,
Director,
Arranger,
)
class Command(BaseCommand):
help = "Merge selected singers by name"
option_list = BaseCommand.option_list + (
make_option(
"-o",
"--old",
dest="old",
help="specify old name",
),
)
option_list = option_list + (
make_option(
"-n",
"--new",
dest="new",
help="specify new name",
),
)
def handle(self, *args, **options):
# make sure file option is present
if options['old'] is None:
raise CommandError("Option `--old=...` must be specified.")
if options['new'] is None:
raise CommandError("Option `--new=...` must be specified.")
# make sure both singers exist
try:
new_person = Person.objects.get(
name__iexact=options['new'],
)
except Person.DoesNotExist:
raise CommandError("New person does not exist.")
try:
old_person = Person.objects.get(
name__iexact=options['old'],
)
except Singer.DoesNotExist:
raise CommandError("Old person does not exist.")
# Move related records
for director in old_person.choruses.all():
Director.objects.create(
person=new_person,
contestant=director.contestant,
part=director.part,
)
for singer in old_person.quartets.all():
Singer.objects.create(
person=new_person,
contestant=singer.contestant,
part=singer.part,
)
for arranger in old_person.arrangements.all():
Arranger.objects.create(
person=new_person,
chart=arranger.chart,
part=arranger.part,
)
# remove redundant singer
try:
old_person.delete()
except Exception as e:
raise CommandError("Error deleted old singer: {0}".format(e))
return "Merged {0} into {1}".format(old_person, new_person)
| Python | 0.000001 | |
5940c705a390a73f5bef786ea9e3800a9d4cf7c9 | Create `serverless` module for handling Serverless Framework deploys (#3352) | lib/ansible/modules/extras/cloud/serverless.py | lib/ansible/modules/extras/cloud/serverless.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Ryan Scott Brown <ryansb@redhat.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: serverless
short_description: Manages a Serverless Framework project
description:
- Provides support for managing Serverless Framework (https://serverless.com/) project deployments and stacks.
version_added: "2.3"
options:
state:
choices: ['present', 'absent']
description:
- Goal state of given stage/project
required: false
default: present
service_path:
description:
- The path to the root of the Serverless Service to be operated on.
required: true
functions:
description:
- A list of specific functions to deploy. If this is not provided, all functions in the service will be deployed.
required: false
default: []
region:
description:
- AWS region to deploy the service to
required: false
default: us-east-1
deploy:
description:
- Whether or not to deploy artifacts after building them. When this option is `false` all the functions will be built, but no stack update will be run to send them out. This is mostly useful for generating artifacts to be stored/deployed elsewhere.
required: false
default: true
notes:
- Currently, the `serverless` command must be in the path of the node executing the task. In the future this may be a flag.
requirements: [ "serverless" ]
author: "Ryan Scott Brown @ryansb"
'''
EXAMPLES = """
# Basic deploy of a service
- serverless: service_path={{ project_dir }} state=present
# Deploy specific functions
- serverless:
service_path: "{{ project_dir }}"
functions:
- my_func_one
- my_func_two
# deploy a project, then pull its resource list back into Ansible
- serverless:
stage: dev
region: us-east-1
service_path: "{{ project_dir }}"
register: sls
# The cloudformation stack is always named the same as the full service, so the
# cloudformation_facts module can get a full list of the stack resources, as
# well as stack events and outputs
- cloudformation_facts:
region: us-east-1
stack_name: "{{ sls.service_name }}"
stack_resources: true
"""
RETURN = """
service_name:
type: string
description: Most
returned: always
sample: my-fancy-service-dev
state:
type: string
description: Whether the stack for the serverless project is present/absent.
returned: always
command:
type: string
description: Full `serverless` command run by this module, in case you want to re-run the command outside the module.
returned: always
sample: serverless deploy --stage production
"""
import os
import traceback
import yaml
def read_serverless_config(module):
path = os.path.expanduser(module.params.get('service_path'))
try:
with open(os.path.join(path, 'serverless.yml')) as sls_config:
config = yaml.safe_load(sls_config.read())
return config
except IOError as e:
module.fail_json(msg="Could not open serverless.yml in {}. err: {}".format(path, str(e)), exception=traceback.format_exc())
module.fail_json(msg="Failed to open serverless config at {}".format(
os.path.join(path, 'serverless.yml')))
def get_service_name(module, stage):
config = read_serverless_config(module)
if config.get('service') is None:
module.fail_json(msg="Could not read `service` key from serverless.yml file")
if stage:
return "{}-{}".format(config['service'], stage)
return "{}-{}".format(config['service'], config.get('stage', 'dev'))
def main():
module = AnsibleModule(
argument_spec=dict(
service_path = dict(required=True),
state = dict(default='present', choices=['present', 'absent'], required=False),
functions = dict(type='list', required=False),
region = dict(default='', required=False),
stage = dict(default='', required=False),
deploy = dict(default=True, type='bool', required=False),
),
)
service_path = os.path.expanduser(module.params.get('service_path'))
state = module.params.get('state')
functions = module.params.get('functions')
region = module.params.get('region')
stage = module.params.get('stage')
deploy = module.params.get('deploy', True)
command = "serverless "
if state == 'present':
command += 'deploy '
elif state == 'absent':
command += 'remove '
else:
module.fail_json(msg="State must either be 'present' or 'absent'. Received: {}".format(state))
if not deploy and state == 'present':
command += '--noDeploy '
if region:
command += '--region {} '.format(region)
if stage:
command += '--stage {} '.format(stage)
rc, out, err = module.run_command(command, cwd=service_path)
if rc != 0:
if state == 'absent' and "-{}' does not exist".format(stage) in out:
module.exit_json(changed=False, state='absent', command=command,
out=out, service_name=get_service_name(module, stage))
module.fail_json(msg="Failure when executing Serverless command. Exited {}.\nstdout: {}\nstderr: {}".format(rc, out, err))
# gather some facts about the deployment
module.exit_json(changed=True, state='present', out=out, command=command,
service_name=get_service_name(module, stage))
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| Python | 0 | |
12bca37026ef4db41bd452dcb8cdc9022cdcf8c9 | Create pythonhelloworld.py | pythonhelloworld.py | pythonhelloworld.py | print "hello word"
| Python | 0.999993 | |
8e4240cd9bc2c06264ef23fddfc93ccf76e5ff9b | Create progressbar.py | progressbar.py | progressbar.py | ################################################################################
# Example usage:
# $ python
# >>> import Progress
# >>> total = 100
# >>> message = 'Doing this task '
# >>> with Progress.Bar(total, message) as bar:
# ... for n in range(total):
# ... time.sleep(0.1)
# ... bar.update()
# ...
# Doing this task [------------------------------------------------------------]
################################################################################
import sys
################################################################################
class Bar:
# A progress bar is draw using 4 elements:
# 1. A message
# 2. The left (start) boundary
# 3. The body of the progress bar
# 4. The right (end) boundary
template = '{msg}{start}{body}{end}'
##################################################
def __init__(self, total, message='', max_width=80,
marker='#', placeholders='-',
start='[', end=']'):
# Assume zero width so that self.from_template() works
self.width = 0
# A bar measures progress towards a total
self.total = total
# A progress bar may have a message before it
self.message = message
# A Progress.Bar is a series of markers
self.marker = marker
# drawn over the top of placeholders
self.placeholders = placeholders
# and delimited by start and end characters
self.start=start
self.end=end
# calculate how much of the max_width will be consumed by the message
# and the start/end delimiters.
padding_width = len(self.from_template())
# Calculate the width of the body of the bar
self.width = max_width - padding_width
# How many parts of the total go per marker in the body of the bar
self.granularity = total / self.width
##############################
def from_template(self):
''' Returns a string representation of the Progress.Bar, including the
message, the start and end markers and a series of placeholders.
'''
return self.template.format(msg = self.message,
start = self.start,
end = self.end,
body = self.placeholders * self.width)
##################################################
def __enter__(self):
# How much of the total has passed
self.progress = 0
# How much of the width has been drawn
self.rendered = 0
# Write out the Progress.Bar with placeholders
sys.stdout.write(self.from_template())
# Write out backspaces until the cursor is at the start marker
sys.stdout.write('\b' * (self.width + len(self.end)))
sys.stdout.flush()
# act as a proper generator
return self
##############################
def __exit__(self, type, value, traceback):
# always render a completed Progress.Bar
while not self.is_fully_rendered():
self.render()
# then finish on the next line
print('')
##################################################
def render(self):
''' Outputs one marker over the top of a placeholder if the progress
bar is still not fully rendered.
'''
self.rendered += 1
if not self.is_fully_rendered():
sys.stdout.write(self.marker)
sys.stdout.flush()
##############################
def is_fully_rendered(self):
return self.rendered > self.width
##################################################
def update(self, n=1):
''' Update the Progress.Bar n counts towards the total.
'''
if n > 0:
self.progress += 1
while self.progress / self.granularity > self.rendered:
self.render()
self.update(n-1)
| Python | 0.000001 | |
465c2c92da5db91bcc1f9149fbfa5722d30e10f9 | add some tests for the Basic Auth filter | test/test_basic_auth.py | test/test_basic_auth.py | import unittest
from libsaas import http
from libsaas.filters import auth
class BasicAuthTestCase(unittest.TestCase):
def test_simple(self):
auth_filter = auth.BasicAuth('user', 'pass')
req = http.Request('GET', 'http://example.net/')
auth_filter(req)
self.assertEqual(req.headers['Authorization'], 'Basic dXNlcjpwYXNz')
def test_unicode(self):
# try both a unicode and a bytes parameter
_lambda = b'\xce\xbb'
_ulambda = _lambda.decode('utf-8')
auth_bytes = auth.BasicAuth('user', _lambda)
auth_unicode = auth.BasicAuth('user', _ulambda)
auth_mixed = auth.BasicAuth(_lambda, _ulambda)
expected_bytes = 'Basic dXNlcjrOuw=='
expected_unicode = expected_bytes
expected_mixed = 'Basic zrs6zrs='
for auth_filter, expected in ((auth_bytes, expected_bytes),
(auth_unicode, expected_unicode),
(auth_mixed, expected_mixed)):
req = http.Request('GET', 'http://example.net/')
auth_filter(req)
self.assertEqual(req.headers['Authorization'], expected)
| Python | 0 | |
f6ea68d6a900eb33c2004aa65805b157b99c9ff8 | Remove beta. from hostname. | fabfile.py | fabfile.py | from fabric.api import *
from fabric.contrib.files import *
from fabric.colors import red
def deploy(branch='master'):
"Deploy the specified branch to the remote host."
root_dir = '/home/www-data'
code_dir = '%s/django_app' % root_dir
virtualenv_name = 'django_venv'
virtualenv_dir = '%s/%s' % (root_dir, virtualenv_name)
host = 'www.censusreporter.org'
sudo('mkdir -p %s' % root_dir)
sudo('chown www-data:www-data %s' % root_dir)
# Install required packages
sudo('apt-get update')
sudo('apt-get install -y git')
# Install and set up apache and mod_wsgi
sudo('apt-get install -y apache2 libapache2-mod-wsgi')
sudo('a2enmod wsgi')
sudo('rm -f /etc/apache2/sites-enabled/000-default')
sudo('rm -f /etc/apache2/sites-enabled/%s' % host)
sudo('rm -f /etc/apache2/sites-available/%s' % host)
upload_template('./server/apache2/site', '/etc/apache2/sites-available/%s' % host, use_sudo=True, context={
'domainname': host,
'django_project_path': '%s/censusreporter' % code_dir,
'django_static_path': '%s/censusreporter/apps/census/static' % code_dir,
'django_venv_path': '%s/lib/python2.7/site-packages' % virtualenv_dir
})
sudo('a2ensite %s' % host)
# Install up to virtualenv
sudo('apt-get install -y python-setuptools')
sudo('easy_install pip')
sudo('pip install virtualenv')
# Create virtualenv and add our django app to its PYTHONPATH
sudo('virtualenv --no-site-packages %s' % virtualenv_dir)
sudo('rm -f %s/lib/python2.7/site-packages/censusreporter.pth' % virtualenv_dir)
append('%s/lib/python2.7/site-packages/censusreporter.pth' % virtualenv_dir, '%s/censusreporter' % code_dir, use_sudo=True)
append('%s/lib/python2.7/site-packages/censusreporter.pth' % virtualenv_dir, '%s/censusreporter/apps' % code_dir, use_sudo=True)
append('%s/bin/activate' % virtualenv_dir, 'export DJANGO_SETTINGS_MODULE="config.prod.settings"', use_sudo=True)
with settings(warn_only=True):
if sudo('test -d %s' % code_dir).failed:
sudo('git clone git://github.com/censusreporter/censusreporter.git %s' % code_dir)
with cd(code_dir):
sudo('git pull origin %s' % branch)
# Install pip requirements
sudo('source %s/bin/activate && pip install -r requirements.txt' % virtualenv_dir)
# Make sure everything is correctly owned
sudo('chown www-data:www-data -R %s %s' % (code_dir, virtualenv_dir))
# Restart apache
sudo('service apache2 restart')
| from fabric.api import *
from fabric.contrib.files import *
from fabric.colors import red
def deploy(branch='master'):
"Deploy the specified branch to the remote host."
root_dir = '/home/www-data'
code_dir = '%s/django_app' % root_dir
virtualenv_name = 'django_venv'
virtualenv_dir = '%s/%s' % (root_dir, virtualenv_name)
host = 'beta.censusreporter.org'
sudo('mkdir -p %s' % root_dir)
sudo('chown www-data:www-data %s' % root_dir)
# Install required packages
sudo('apt-get update')
sudo('apt-get install -y git')
# Install and set up apache and mod_wsgi
sudo('apt-get install -y apache2 libapache2-mod-wsgi')
sudo('a2enmod wsgi')
sudo('rm -f /etc/apache2/sites-enabled/000-default')
sudo('rm -f /etc/apache2/sites-enabled/%s' % host)
sudo('rm -f /etc/apache2/sites-available/%s' % host)
upload_template('./server/apache2/site', '/etc/apache2/sites-available/%s' % host, use_sudo=True, context={
'domainname': host,
'django_project_path': '%s/censusreporter' % code_dir,
'django_static_path': '%s/censusreporter/apps/census/static' % code_dir,
'django_venv_path': '%s/lib/python2.7/site-packages' % virtualenv_dir
})
sudo('a2ensite %s' % host)
# Install up to virtualenv
sudo('apt-get install -y python-setuptools')
sudo('easy_install pip')
sudo('pip install virtualenv')
# Create virtualenv and add our django app to its PYTHONPATH
sudo('virtualenv --no-site-packages %s' % virtualenv_dir)
sudo('rm -f %s/lib/python2.7/site-packages/censusreporter.pth' % virtualenv_dir)
append('%s/lib/python2.7/site-packages/censusreporter.pth' % virtualenv_dir, '%s/censusreporter' % code_dir, use_sudo=True)
append('%s/lib/python2.7/site-packages/censusreporter.pth' % virtualenv_dir, '%s/censusreporter/apps' % code_dir, use_sudo=True)
append('%s/bin/activate' % virtualenv_dir, 'export DJANGO_SETTINGS_MODULE="config.prod.settings"', use_sudo=True)
with settings(warn_only=True):
if sudo('test -d %s' % code_dir).failed:
sudo('git clone git://github.com/censusreporter/censusreporter.git %s' % code_dir)
with cd(code_dir):
sudo('git pull origin %s' % branch)
# Install pip requirements
sudo('source %s/bin/activate && pip install -r requirements.txt' % virtualenv_dir)
# Make sure everything is correctly owned
sudo('chown www-data:www-data -R %s %s' % (code_dir, virtualenv_dir))
# Restart apache
sudo('service apache2 restart')
| Python | 0.000007 |
4cac86aeb2d24a916fc5ae9ca98e3898f4729e1c | add protocol.py module | plumbca/protocol.py | plumbca/protocol.py | # -*- coding: utf-8 -*-
"""
plumbca.protocol
~~~~~~~~~~~~~~~~
Implements the protocol support for Plumbca.
:copyright: (c) 2015 by Jason Lai.
:license: BSD, see LICENSE for more details.
"""
import logging
import asyncio
from .message import Request
from .worker import Worker
actlog = logging.getLogger('activity')
errlog = logging.getLogger('errors')
class PlumbcaCmdProtocol:
def __init__(self):
self.handler = Worker()
async def plumbca_cmd_handle(self, reader, writer):
"""Simple plumbca command protocol implementation.
plumbca_cmd_handle handles incoming command request.
"""
data = await reader.read()
req = Request(data)
addr = writer.get_extra_info('peername')
actlog.info("<Server> Received %r from %r", req.command, addr)
# drive the command process
resp = self.handler.run_command(req)
writer.write(req.args)
await writer.drain()
actlog.info("Close the client %r socket", addr)
writer.close()
| Python | 0.000001 | |
545af0493cf08cb15d262f3a5333df6d1fce6848 | Add util convenience functions for accessing data without decorators | brake/utils.py | brake/utils.py | from decorators import _backend
"""Access limits and increment counts without using a decorator."""
def get_limits(request, label, field, periods):
limits = []
count = 10
for period in periods:
limits.extend(_backend.limit(
label,
request,
field=field,
count=count,
period=period
))
count += 10
return limits
def inc_counts(request, label, field, periods):
for period in periods:
_backend.count(label, request, field=field, period=period)
| Python | 0 | |
a88986fa441b84aa0c5da76e63a08154ef243fab | Add error codes | quic/errors.py | quic/errors.py | import enum
class Error(enum.Enum):
# Connection has reached an invalid state.
QUIC_INTERNAL_ERROR = 0x80000001
# There were data frames after the a fin or reset.
QUIC_STREAM_DATA_AFTER_TERMINATION = 0x80000002
# Control frame is malformed.
QUIC_INVALID_PACKET_HEADER = 0x80000003
# Frame data is malformed.
QUIC_INVALID_FRAME_DATA = 0x80000004
# Multiple final offset values were received on the same stream
QUIC_MULTIPLE_TERMINATION_OFFSETS = 0x80000005
# The stream was cancelled
QUIC_STREAM_CANCELLED = 0x80000006
# A stream that is critical to the protocol was closed.
QUIC_CLOSED_CRITICAL_STREAM = 0x80000007
# The packet contained no payload.
QUIC_MISSING_PAYLOAD = 0x80000030
# STREAM frame data is malformed.
QUIC_INVALID_STREAM_DATA = 0x8000002E
# Received STREAM frame data is not encrypted.
QUIC_UNENCRYPTED_STREAM_DATA = 0x8000003D
# Received a frame which is likely the result of memory corruption.
QUIC_MAYBE_CORRUPTED_MEMORY = 0x80000059
# RST_STREAM frame data is malformed.
QUIC_INVALID_RST_STREAM_DATA = 0x80000006
# CONNECTION_CLOSE frame data is malformed.
QUIC_INVALID_CONNECTION_CLOSE_DATA = 0x80000007
# GOAWAY frame data is malformed.
QUIC_INVALID_GOAWAY_DATA = 0x80000008
# WINDOW_UPDATE frame data is malformed.
QUIC_INVALID_WINDOW_UPDATE_DATA = 0x80000039
# BLOCKED frame data is malformed.
QUIC_INVALID_BLOCKED_DATA = 0x8000003A
# PATH_CLOSE frame data is malformed.
QUIC_INVALID_PATH_CLOSE_DATA = 0x8000004E
# ACK frame data is malformed.
QUIC_INVALID_ACK_DATA = 0x80000009
# Version negotiation packet is malformed.
QUIC_INVALID_VERSION_NEGOTIATION_PACKET = 0x8000000A
# Public RST packet is malformed.
QUIC_INVALID_PUBLIC_RST_PACKET = 0x8000000b
# There was an error decrypting.
QUIC_DECRYPTION_FAILURE = 0x8000000c
# There was an error encrypting.
QUIC_ENCRYPTION_FAILURE = 0x8000000d
# The packet exceeded kMaxPacketSize.
QUIC_PACKET_TOO_LARGE = 0x8000000e
# The peer is going away. May be a client or server.
QUIC_PEER_GOING_AWAY = 0x80000010
# A stream ID was invalid.
QUIC_INVALID_STREAM_ID = 0x80000011
# A priority was invalid.
QUIC_INVALID_PRIORITY = 0x80000031
# Too many streams already open.
QUIC_TOO_MANY_OPEN_STREAMS = 0x80000012
# The peer created too many available streams.
QUIC_TOO_MANY_AVAILABLE_STREAMS = 0x8000004c
# Received public reset for this connection.
QUIC_PUBLIC_RESET = 0x80000013
# Invalid protocol version.
QUIC_INVALID_VERSION = 0x80000014
# The Header ID for a stream was too far from the previous.
QUIC_INVALID_HEADER_ID = 0x80000016
# Negotiable parameter received during handshake had invalid value.
QUIC_INVALID_NEGOTIATED_VALUE = 0x80000017
# There was an error decompressing data.
QUIC_DECOMPRESSION_FAILURE = 0x80000018
# The connection timed out due to no network activity.
QUIC_NETWORK_IDLE_TIMEOUT = 0x80000019
# The connection timed out waiting for the handshake to complete.
QUIC_HANDSHAKE_TIMEOUT = 0x80000043
# There was an error encountered migrating addresses.
QUIC_ERROR_MIGRATING_ADDRESS = 0x8000001a
# There was an error encountered migrating port only.
QUIC_ERROR_MIGRATING_PORT = 0x80000056
# We received a STREAM_FRAME with no data and no fin flag set.
QUIC_EMPTY_STREAM_FRAME_NO_FIN = 0x80000032
# The peer received too much data, violating flow control.
QUIC_FLOW_CONTROL_RECEIVED_TOO_MUCH_DATA = 0x8000003b
# The peer sent too much data, violating flow control.
QUIC_FLOW_CONTROL_SENT_TOO_MUCH_DATA = 0x8000003f
# The peer received an invalid flow control window.
QUIC_FLOW_CONTROL_INVALID_WINDOW = 0x80000040
# The connection has been IP pooled into an existing connection.
QUIC_CONNECTION_IP_POOLED = 0x8000003e
# The connection has too many outstanding sent packets.
QUIC_TOO_MANY_OUTSTANDING_SENT_PACKETS = 0x80000044
# The connection has too many outstanding received packets.
QUIC_TOO_MANY_OUTSTANDING_RECEIVED_PACKETS = 0x80000045
# The QUIC connection has been cancelled.
QUIC_CONNECTION_CANCELLED = 0x80000046
# Disabled QUIC because of high packet loss rate.
QUIC_BAD_PACKET_LOSS_RATE = 0x80000047
# Disabled QUIC because of too many PUBLIC_RESETs post handshake.
QUIC_PUBLIC_RESETS_POST_HANDSHAKE = 0x80000049
# Disabled QUIC because of too many timeouts with streams open.
QUIC_TIMEOUTS_WITH_OPEN_STREAMS = 0x8000004a
# QUIC timed out after too many RTOs.
QUIC_TOO_MANY_RTOS = 0x80000055
# A packet was received with the wrong encryption level (i.e. it should have been
# encrypted but was not.)
QUIC_ENCRYPTION_LEVEL_INCORRECT = 0x8000002c
# This connection involved a version negotiation which appears to have been
# tampered with.
QUIC_VERSION_NEGOTIATION_MISMATCH = 0x80000037
# IP address changed causing connection close.
QUIC_IP_ADDRESS_CHANGED = 0x80000050
# Client address validation failed.
QUIC_ADDRESS_VALIDATION_FAILURE = 0x80000051
# Stream frames arrived too discontiguously so that stream sequencer
# buffer maintains too many gaps.
QUIC_TOO_MANY_FRAME_GAPS = 0x8000005d
# Connection closed because server hit max number of sessions allowed.
QUIC_TOO_MANY_SESSIONS_ON_SERVER = 0x80000060
| Python | 0.000002 | |
e0c3a46d1c3c13b5c956bf3cc6f30ad495f87ccd | put the logger config in a separate file for cleanliness | voglogger.py | voglogger.py | #!/usr/bin/python
"""
logger management for VOGLbot
writes out to both the console and a file 'voglbot.log'
"""
import sys
import logging
import time
logging.basicConfig(
filename = 'voglbot.log',
filemode = 'w',
level=logging.DEBUG,
format='%(asctime)s: %(message)s',
datefmt = '%d-%m %H:%M:%S',
stream = sys.stdout,
)
# for console logging
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)-12s : %(levelname)-8s %(message)s')
console.setFormatter(formatter)
logger = logging.getLogger()
logging.getLogger('').addHandler(console)
| Python | 0 | |
a984120bdb6c67a3dc2ca89ce9ae5498230015ea | Add initial runner | hug/run.py | hug/run.py | """hug/run.py
Contains logic to enable execution of hug APIS from the command line
"""
from wsgiref.simple_server import make_server
import falcon
import sys
import importlib
def server(module):
api = falcon.API()
for url, method_handlers in module.HUG_API_CALLS:
api.add_route(url, namedtuple('Router', method_handlers.keys())(**method_handlers))
return api
def terminal():
if len(sys.argv) < 2:
print("Please specify a hug API file to start the server with", file=sys.stderr)
api = server(importlib.machinery.SourceFileLoader(sys.argv[1].split(".")[0], sys.argv[1]).load_module())
httpd = make_server('', 8000, api)
print("Serving on port 8000...")
httpd.serve_forever()
| Python | 0.000003 | |
1578e1a129d91605148cf48f8793ac098ad0de7e | add command group | ibu/cli.py | ibu/cli.py | # -*- coding: utf-8 -*-
from __future__ import print_function
import click
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
@click.group()
def ibu():
pass
@click.command(context_settings=CONTEXT_SETTINGS)
def test():
print("hello")
| Python | 0.000004 | |
33dc091a43d3868324631fdb420721ab35d1f6ce | Create dis_q.py | dis_q.py | dis_q.py | #!/usr/bin/python
import pymqi
queue_manager = "MQSD.TEST"
channel = "SYSTEM.DEF.SVRCONN"
host = "10.21.218.15"
port = "14123"
conn_info = "%s(%s)" % (host, port)
prefix = "*"
queue_type = pymqi.CMQC.MQQT_ALL
# queue_type = pymqi.CMQC.MQQT_LOCAL
excluded_prefix = ['SYSTEM', 'MSB', 'AMQ' , 'MQAI']
# excluded_prefix = [ ]
args = {pymqi.CMQC.MQCA_Q_NAME: prefix,
pymqi.CMQC.MQIA_Q_TYPE: queue_type}
qmgr = pymqi.connect(queue_manager, channel, conn_info)
pcf = pymqi.PCFExecute(qmgr)
try:
response = pcf.MQCMD_INQUIRE_Q(args)
except pymqi.MQMIError, e:
if e.comp == pymqi.CMQC.MQCC_FAILED and e.reason == pymqi.CMQC.MQRC_UNKNOWN_OBJECT_NAME:
print "No queues matched given arguments."
else:
raise
else:
for queue_info in response:
# Queue Name QueueDepth MaxDepth XMITQ Type
# https://www-01.ibm.com/support/knowledgecenter/SSFKSJ_7.1.0/com.ibm.mq.javadoc.doc/WMQJavaClasses/com/ibm/mq/pcf/CMQC.html
queue_name = queue_info[pymqi.CMQC.MQCA_Q_NAME]
if not any(queue_name.startswith(prefix) for prefix in excluded_prefix):
queue_type = queue_info[pymqi.CMQC.MQIA_Q_TYPE]
if queue_type == 1: #LOCAL
queue_type = "LOCAL"
queue_depth = queue_info[pymqi.CMQC.MQIA_CURRENT_Q_DEPTH]
queue_mdepth = queue_info[pymqi.CMQC.MQIA_MAX_Q_DEPTH]
print "%s \t %s \t %s \t %s" % (queue_name, queue_depth, queue_mdepth, queue_type)
# elif queue_type == 2: #MODEL
elif queue_type == 3: #ALIAS
queue_type = "ALIAS"
queue_depth = "-"
queue_mdepth = "------"
print "%s \t %s \t %s \t %s" % (queue_name, queue_depth, queue_mdepth, queue_type)
elif queue_type == 6: #REMOTE
queue_type = "REMOTE"
queue_depth = "-"
queue_mdepth = "------"
print "%s \t %s \t %s \t %s" % (queue_name, queue_depth, queue_mdepth, queue_type)
# print "%s \t %s" % (queue_name, queue_type)
else:
print "%s \t %s" % (queue_name, queue_type)
# print "%s \t %s" % (queue_name, queue_type)
qmgr.disconnect()
| Python | 0.000139 | |
b7541c063b6fc10fdd622cbd680ea4418c679f6b | Add NodeList iterator | d1_libclient_python/src/d1_client/iter/node.py | d1_libclient_python/src/d1_client/iter/node.py | # -*- coding: utf-8 -*-
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2016 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Iterate over the nodes that are registered in a DataONE environment
For each Node in the environment, returns a PyXB representation of a DataONE
Node document.
https://releases.dataone.org/online/api-documentation-v2.0/apis/Types.html#Types.Node
"""
import d1_client.mnclient_1_1
import d1_client.mnclient_2_0
import d1_common.types.dataoneTypes_v1_1 as v1
import d1_common.types.dataoneTypes_v2_0 as v2
MAJOR_VERSION = 2
class NodeListIterator(object):
def __init__(
self,
base_url,
major_version=MAJOR_VERSION,
client_dict=None,
listNodes_dict=None,
):
self._base_url = base_url
self._major_version = major_version
self._client_dict = client_dict or {}
self._listNodes_dict = listNodes_dict
def __iter__(self):
client = d1_client.mnclient_2_0.MemberNodeClient_2_0(
self._base_url, **self._client_dict
)
node_list_pyxb = client.listNodes()
logging.debug(
'Retrieved {} Node documents'.format(len(node_list_pyxb.node))
)
for node_pyxb in sorted(
node_list_pyxb.node, key=lambda x: x.identifier.value()
):
yield node_pyxb
| Python | 0.000001 | |
c00d49255187b6ac10c09f687bb8442f89b14142 | Fix and extend twisted.conch.ssh.keys.Key. Twisted's Key._fromString_PRIVATE_OPENSSH implementation supports only 3DES-CBC Our Key parses and loads the cipher specified in a DEK-Info header. Also, add Key.encrypt() and Key.decrypt() for general-use. | lib/crypto.py | lib/crypto.py | import re, sys
from hashlib import md5
from Crypto import Util
from Crypto.Cipher import AES, Blowfish, DES3
from Crypto.PublicKey import RSA, DSA
from pyasn1.codec.der import decoder as DERDecoder
from twisted.conch.ssh.keys import (
BadKeyError, EncryptedKeyError,
Key as _Key)
from twisted.conch.ssh import common
from twisted.conch.ssh.transport import _DummyCipher
from twisted.python.util import InsensitiveDict
from jersey import log
class Key(_Key):
def encrypt(self, plaintext):
s = self.keyObject.size() / 8
ciphertext = ""
while plaintext:
d, plaintext = plaintext[:s], plaintext[s:]
ciphertext += self.keyObject.encrypt(d, None)[0]
return ciphertext
def decrypt(self, ciphertext):
s = self.keyObject.size() / 8 + 1
plaintext = ""
while ciphertext:
e, ciphertext = ciphertext[:s], ciphertext[s:]
plaintext += self.keyObject.decrypt(e)
return plaintext
_cipherMap = {
'AES-256-CBC':(AES, 32),
'AES-192-CBC':(AES, 24),
'AES-128-CBC':(AES, 16),
'BLOWFISH-CBC':(Blowfish, 16),
'3DES-CBC':(DES3, 24),
'DES-EDE3-CBC':(DES3, 24),
'NONE':(_DummyCipher, 0),
}
_keyTypes = ("RSA", "DSA")
_privateKeyBeginRE = re.compile(
"^-----BEGIN ({0}) PRIVATE KEY-----$".format("|".join(_keyTypes)))
@classmethod
def _fromString_PRIVATE_OPENSSH(Class, data, passphrase):
"""
Return a private key object corresponding to this OpenSSH private key
string. If the key is encrypted, passphrase MUST be provided.
Providing a passphrase for an unencrypted key is an error.
@type data: C{str}
@type passphrase: C{str}
@return: a C{Crypto.PublicKey.pubkey.pubkey} object
@raises BadKeyError: if
* a passphrase is provided for an unencrypted key
* a passphrase is not provided for an encrypted key
* the ASN.1 encoding is incorrect
"""
kind, headers, encKey = Class._parsePrivateKey(data)
if headers.get("Proc-Type") == "4,ENCRYPTED":
if not passphrase:
raise EncryptedKeyError("encrypted key with no passphrase")
cipher, iv = Class._parseDekInfo(headers["DEK-Info"])
keyData = Class._decryptPrivateKey(cipher, iv, passphrase,
encKey.decode("base64"))
else:
keyData = encKey.decode("base64")
decodedKey = Class._decodeKey(keyData)
key = Class._buildKey(kind, decodedKey)
return key
@classmethod
def _parsePrivateKey(Class, data):
"""The format of an OpenSSH private key string is::
-----BEGIN <key type> PRIVATE KEY-----
[Proc-Type: 4,ENCRYPTED
DEK-Info: <cipher>,<initialization value>]
<base64-encoded ASN.1 structure>
------END <key type> PRIVATE KEY------
"""
lines = data.split("\n")
keyType = Class._parseKeyType(lines)
headers = Class._parseHeaders(lines)
keyData = Class._parseKeyData(lines, keyType)
return keyType, headers, keyData
@classmethod
def _parseKeyType(Class, lines):
keyType = None
while lines and keyType is None:
line = lines.pop(0)
m = Class._privateKeyBeginRE.match(line)
if m:
keyType = m.groups()[0]
if not keyType:
raise BadKeyError("No private key found.")
return keyType
@classmethod
def _parseHeaders(Class, lines):
headers = InsensitiveDict()
moreHeaders = True
key, val = None, None
while lines and moreHeaders:
line = lines.pop(0)
if line == "": # end of headers
moreHeaders = False
if key and val: # save header
headers[key] = val
key, val = None, None
else:
if key and val: # already parsing a header
val += line
else: # new header
try:
key, val = line.split(":", 1)
val = val.lstrip()
except ValueError:
lines.insert(0, line)
return headers
if val.endswith("\\"): # header will be continued
val = val[:-1] # strip trailing escape char
else: # save header
headers[key] = val
key, val = None, None
return headers
@classmethod
def _parseKeyData(Class, lines, keyType):
endToken = "-----END {0} PRIVATE KEY-----".format(keyType)
keyData = ""
moreKey = True
while lines and moreKey:
line = lines.pop(0)
if line == endToken:
moreKey = False
else:
keyData += line
if moreKey:
raise BadKeyError("No END delimeter found")
if not keyData:
raise BadKeyError("No private key data found")
return keyData
@classmethod
def _buildKey(Class, kind, keyData):
buildKey = getattr(Class, "_buildPrivateKey_{0}".format(kind))
return buildKey(keyData)
@classmethod
def _buildPrivateKey_RSA(Class, decodedKey):
"""The ASN.1 structure of a RSA key is::
(0, n, e, d, p, q)
"""
if len(decodedKey) == 2: # alternate RSA key
decodedKey = decodedKey[0]
if len(decodedKey) < 6:
raise BadKeyError('RSA key failed to decode properly')
n, e, d, p, q = [long(value) for value in decodedKey[1:6]]
if p > q: # make p smaller than q
p, q = q, p
return Class(RSA.construct((n, e, d, p, q)))
@classmethod
def _buildPrivateKey_DSA(Class, decodedKey):
"""The ASN.1 structure of a DSA key is::
(0, p, q, g, y, x)
"""
p, q, g, y, x = [long(value) for value in decodedKey[1: 6]]
if len(decodedKey) < 6:
raise BadKeyError('DSA key failed to decode properly')
return Class(DSA.construct((y, g, p, q, x)))
@classmethod
def _parseDekInfo(Class, dekInfo):
cipher, ivData = dekInfo.split(',')
iv = ivData.decode("hex")
return cipher, iv
@classmethod
def _decryptPrivateKey(Class, cipherName, iv, passphrase, data):
cipher, keySize = Class._cipherMap[cipherName.upper()]
decKey = Class._buildDecryptKey(passphrase, iv, keySize)
c = cipher.new(decKey, cipher.MODE_CBC, iv[:cipher.block_size])
keyData = c.decrypt(data)
return Class._trimPadding(keyData)
@classmethod
def _trimPadding(Class, keyData):
removeLen = ord(keyData[-1])
return keyData[:-removeLen]
@classmethod
def _buildDecryptKey(Class, passphrase, iv, keyLen=16, ivLen=8):
d = md5()
dk = ""
while len(dk) < keyLen:
d.update(passphrase)
d.update(iv[:ivLen])
t = d.digest()
dk += t
d.update(t)
return dk[:keyLen]
@classmethod
def _decodeKey(self, encoded):
try:
return DERDecoder.decode(encoded)[0]
except Exception, e:
raise BadKeyError('something wrong with decode')
| Python | 0 | |
2c900f8bddc9efb40d900bf28f8c6b3188add71e | Disable trix parser tests with Jython | test/test_trix_parse.py | test/test_trix_parse.py | #!/usr/bin/env python
from rdflib.graph import ConjunctiveGraph
import unittest
class TestTrixParse(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testAperture(self):
g=ConjunctiveGraph()
g.parse("test/trix/aperture.trix",format="trix")
c=list(g.contexts())
#print list(g.contexts())
t=sum(map(len, g.contexts()))
self.assertEquals(t,24)
self.assertEquals(len(c),4)
#print "Parsed %d triples"%t
def testSpec(self):
g=ConjunctiveGraph()
g.parse("test/trix/nokia_example.trix",format="trix")
#print "Parsed %d triples"%len(g)
def testNG4j(self):
g=ConjunctiveGraph()
g.parse("test/trix/ng4jtest.trix",format="trix")
#print "Parsed %d triples"%len(g)
import platform
if platform.system() == 'Java':
from nose import SkipTest
raise SkipTest('Jython issues - "JavaSAXParser" object has no attribute "start_namespace_decl"')
if __name__=='__main__':
unittest.main()
| #!/usr/bin/env python
from rdflib.graph import ConjunctiveGraph
import unittest
class TestTrixParse(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testAperture(self):
g=ConjunctiveGraph()
g.parse("test/trix/aperture.trix",format="trix")
c=list(g.contexts())
#print list(g.contexts())
t=sum(map(len, g.contexts()))
self.assertEquals(t,24)
self.assertEquals(len(c),4)
#print "Parsed %d triples"%t
def testSpec(self):
g=ConjunctiveGraph()
g.parse("test/trix/nokia_example.trix",format="trix")
#print "Parsed %d triples"%len(g)
if __name__=='__main__':
unittest.main()
| Python | 0 |
0cb6474b8c02f2cb7af54f8321f82a53175e8345 | check for globals in the lib that are not prefixed with toku. addresses #74 | src/tokuglobals.py | src/tokuglobals.py | #!/usr/bin/python
import sys
import os
import re
def checkglobals(libname, exceptsymbols, verbose):
badglobals = 0
nmcmd = "nm -g " + libname
f = os.popen(nmcmd)
b = f.readline()
while b != "":
match = re.match("^([0-9a-f]+)\s(.?)\s(.*)$", b)
if match == None:
match = re.match("^\s+(.*)$", b)
if match == None:
print "unknown", b
badglobals = 1
else:
type = match.group(2)
symbol = match.group(3)
if verbose: print type, symbol
match = re.match("^toku_", symbol)
if match == None and not exceptsymbols.has_key(symbol):
print "non toku symbol=", symbol
badglobals = 1
b = f.readline()
f.close()
return badglobals
def main():
verbose = 0
for arg in sys.argv[1:]:
if arg == "-v":
verbose += 1
exceptsymbols = {}
for n in [ "_init", "_fini", "_end", "_edata", "__bss_start" ]:
exceptsymbols[n] = 1
for n in [ "db_env_create", "db_create", "db_strerror", "db_version", "log_compare" ]:
exceptsymbols[n] = 1
return checkglobals("libdb.so", exceptsymbols, verbose)
sys.exit(main())
| Python | 0.00001 | |
24b8437003269ebd10c46d0fbdaa3e432d7535d6 | Add VCF -> non-reference likelihood table script. | genotype-likelihoods.py | genotype-likelihoods.py | from __future__ import print_function
import sys
import cyvcf
from argparse import ArgumentParser, FileType
import toolz as tz
description = ("Create a table of probability of a non reference call for each "
"genotype for each sample. This is PL[0]. -1 is output for samples "
"with a missing PL call at a position.")
parser = ArgumentParser(description=description)
parser.add_argument("vcf", type=FileType('r'),
help="VCF file to convert, use '-' to read from stdin")
args = parser.parse_args()
vcf_reader = cyvcf.Reader(args.vcf)
records = tz.take(10, vcf_reader)
samples = vcf_reader.samples[1:5]
header = "\t".join([str(x) for x in ["CHROM", "POS", "ID", "REF", "ALT"] + samples])
print(header, file=sys.stdout)
for record in records:
line = [record.CHROM, record.POS, record.ID, record.REF, record.alleles[1]]
pls = [x.data.get("PL", None) for x in record.samples[1:5]]
pls = [x[0] if x else "-1" for x in pls]
print("\t".join([str(x) for x in line + pls]), file=sys.stdout)
| Python | 0 | |
6a9ddbf5d775df14c994c9af9e89195ca05a58f9 | Add pyjokes CLI test | tests/test_cli_error.py | tests/test_cli_error.py |
import pytest
import subprocess
from subprocess import Popen, PIPE
def test_pyjokes_call_exception():
pytest.raises(subprocess.CalledProcessError, "subprocess.check_call('pyjokes')")
def test_pyjokes_call_output():
try:
p = subprocess.Popen('pyjokes', stdin=PIPE, stdout=PIPE, stderr=PIPE)
except:
out, err = p.communicate()
assert out == b'Did you mean pyjoke?'
assert p.returncode == 1
pass | Python | 0 | |
83a4c9bfa64543ecda65ed4c916fad8ad0a9233d | Create markov.py | markov.py | markov.py | # -*- coding: utf-8 -*-
import random
ngram = lambda text, n: [text[i:i+n] for i in xrange(len(text) - n + 1)]
flatten2D = lambda data: [flattened for inner in data for flattened in inner]
randelement = lambda x: x[random.randint(0, len(x) - 1)]
class Markov:
def __init__(self, data, n):
self.data = data
self.n = n
def markov(self, limit, firstword, lastword, getlength, lengthlimit=None, result=None):
if limit == 0:
return [k for k in [i[0] for i in result[:-1]] + result[-1]]
candidatelist = []
if result != None:
candidatelist = [candidate for candidate in self.data if result[-1][1:self.n] == candidate[0:self.n - 1]]
else:
result = []
candidatelist = [candidate for candidate in self.data if candidate[0] == firstword]
if candidatelist == []:
result.append(randelement(self.data))
else:
result.append(randelement(candidatelist))
wordcount = getlength([k for k in [i[0] for i in result[:-1]] + result[-1]])
charlimitflag = lengthlimit == None or wordcount < lengthlimit
if not charlimitflag:
result = result[:-1]
mrkv = lambda li: self.markov(li, firstword, lastword, getlength, lengthlimit, result)
return mrkv(limit - 1) if charlimitflag and result[-1][-1] != lastword else mrkv(0)
| Python | 0.000001 | |
0970115f9bc1bab019c23ab46e64b26d5e754313 | Implement function for displaying tuning guidance on a DIY 8-segment LEDs display | led_display.py | led_display.py | import math
from gpiozero import LED
from time import sleep
g0 = LED(12)
f0 = LED(16)
a0 = LED(20)
b0 = LED(21)
e0 = LED(17)
d0 = LED(27)
c0 = LED(22)
g1 = LED(25)
f1 = LED(24)
a1 = LED(23)
b1 = LED(18)
e1 = LED(5)
d1 = LED(6)
c1 = LED(13)
PITCHES = {
'E2': ((a0, d0, e0, f0, g0), (b0, c0)),
'A2': ((a0, b0, c0, e0, f0, g0), (d0, )),
'D3': ((b0, c0, d0, e0, g0), (a0, f0,)),
'G3': ((a0, b0, c0, d0, f0, g0), (e0, )),
'B3': ((c0, d0, e0, f0, g0), (a0, b0,)),
'E4': ((a0, d0, e0, f0, g0), (b0, c0)),
}
DIRECTIONS = {
-1: ((a1, b1, f1, g1), (c1, d1, e1,)),
0: ((g1, ), (a1, b1, c1, d1, e1, f1, )),
1: ((c1, d1, e1, g1), (a1, b1, f1)),
}
def display_tuning_guidance(pitch, direction):
leds_on = PITCHES[pitch][0] + DIRECTIONS[direction][0]
leds_off = PITCHES[pitch][1] + DIRECTIONS[direction][1]
# Turn the appropriate leds on or off
for led in leds_on:
led.off()
for led in leds_off:
led.on()
| Python | 0 | |
64302511d7f517afb27511dcafead96b8c9c3e16 | Create submodularpick.py | lime/submodularpick.py | lime/submodularpick.py | class SubmodularPick(object):
"""Class for submodular pick"""
def __init__(self,
explainer,
data,
predict_fn,
method='sample',
sample_size=1000,
num_exps_desired=5,
num_features=10,
**kwargs):
"""Saves a representative sample of explanation objects using SP-LIME,
as well as saving all generated explanations
First, a collection of candidate explanations are generated
(see explain_instance). From these candidates, num_exps_desired are
chosen using submodular pick. (see marcotcr et al paper).
Args:
data: a numpy array where each row is a single input into predict_fn
predict_fn: prediction function. For classifiers, this should be a
function that takes a numpy array and outputs prediction
probabilities. For regressors, this takes a numpy array and
returns the predictions. For ScikitClassifiers, this is
`classifier.predict_proba()`. For ScikitRegressors, this
is `regressor.predict()`. The prediction function needs to work
on multiple feature vectors (the vectors randomly perturbed
from the data_row).
method: The method to use to generate candidate explanations
method == 'sample' will sample the data uniformly at
random. The sample size is given by sample_size. Otherwise
if method == 'full' then explanations will be generated for the
entire data.
sample_size: The number of instances to explain if method == 'sample'
num_exps_desired: The number of explanation objects returned.
num_features: maximum number of features present in explanation
Sets value:
sp_explanations: A list of explanation objects that has a high coverage
explanations: All the candidate explanations saved for potential future use.
"""
# Parse args
if method == 'sample':
if sample_size > len(data):
warnings.warn("""Requested sample size larger than
size of input data. Using all data""")
sample_size = len(data)
all_indices = np.arange(len(data))
np.random.shuffle(all_indices)
sample_indices = all_indices[:sample_size]
elif method == 'full':
sample_indices = np.arange(len(data))
# Generate Explanations
self.explanations = []
for i in sample_indices:
self.explanations.append(
explainer.explain_instance(
data[i], predict_fn, num_features=num_features))
# Error handling
try:
num_exps_desired = int(num_exps_desired)
except TypeError:
return("Requested number of explanations should be an integer")
if num_exps_desired > len(self.explanations):
warnings.warn("""Requested number of explanations larger than
total number of explanations, returning all
explanations instead.""")
num_exps_desired = min(num_exps_desired, len(self.explanations))
# Find all the explanation model features used. Defines the dimension d'
features_dict = {}
feature_iter = 0
for exp in self.explanations:
for feature, _ in exp.as_list():
if feature not in features_dict.keys():
features_dict[feature] = (feature_iter)
feature_iter += 1
d_prime = len(features_dict.keys())
# Create the n x d' dimensional 'explanation matrix', W
W = np.zeros((len(self.explanations), d_prime))
for i, exp in enumerate(self.explanations):
for feature, value in exp.as_list():
W[i, features_dict[feature]] = value
# Create the global importance vector, I_j described in the paper
importance = np.sum(abs(W), axis=0)**.5
# Now run the SP-LIME greedy algorithm
remaining_indices = set(range(len(self.explanations)))
V = []
for _ in range(num_exps_desired):
best = 0
best_ind = None
current = 0
for i in remaining_indices:
current = np.dot(
(np.sum(abs(W)[V + [i]], axis=0) > 0), importance
) # coverage function
if current >= best:
best = current
best_ind = i
V.append(best_ind)
remaining_indices -= {best_ind}
self.sp_explanations = [self.explanations[i] for i in V]
| Python | 0 | |
550d8bcd49e5ec591286f3f42de7dd54ef853bb8 | Add a utility script to print duplicates | find_dupes.py | find_dupes.py | #!/usr/bin/env python3
import json
import os
import random
scriptpath = os.path.dirname(__file__)
data_dir = os.path.join(scriptpath, 'data')
all_json = [f for f in os.listdir(data_dir) if os.path.isfile(os.path.join(data_dir, f))]
quotes = []
for f in all_json:
filename = os.path.join(data_dir, f)
with open(filename) as json_data:
quotes += json.load(json_data)['data']
uniq_authors = { quote['author'] for quote in quotes}
uniq_quotes = { quote['quote'] for quote in quotes}
print('Unique quotes: {}, authors: {}'.format(len(uniq_quotes), len(uniq_authors)))
seen = set()
dupes = sorted([x for x in quotes if x['quote'] in seen or seen.add(x['quote'])], key=lambda x:x['quote'])
print(*dupes, sep='\n') | Python | 0.000003 | |
a02b2866a3bf6067a2ee7f6d194c52c0a4d4500e | Create welcome_email_daemon.py | welcome_email_daemon.py | welcome_email_daemon.py | #send new members a welcome email
from smtplib import SMTP as smtp
from time import sleep
def welcome_bot():
fp = open('busters','r')
np = open('welcomed','a')
for eachline in fp:
if not is_in(eachline.strip()):
send_welcome(eachline.strip())
np.write(eachline.strip()+'\n')
fp.close()
np.close()
def is_in(email):
is_in_welcomed = False
mp = open('welcomed','r')
for eachline in mp:
if eachline.strip() == email: is_in_welcomed = True
return is_in_welcomed
mp.close()
def send_welcome(email):
FROM = 'customer_services@my_domain.com'
TO = email
BODY_success = "\r\nThankyou for joining the Food Coop! To make an order go to www.my_website.com\r\n\
Pick the items you want and copy-paste the code to customer_services@my_domain.com with the \
subject line of the email set to 'food' (all lower-case letters and without the quotation marks)\r\n\r\n\
If your order is successful you'll receive a confirmation email from the Food Coop within 5 minutes \
of you sending in your order\r\n\r\n\
Pickup is on Wednesday on Mars (on the first floor of the Food Department. We will put signs up \
on the day) from 12 to 3pm. See you there!\r\n\r\nThe Food Coop Team\r\n(automated email. \
write to customer_services@my_domain.com if you're having trouble)\r\n"
SUBJECT_success = "Food Coop membership"
message = 'From: ' + FROM + '\r\nTo: ' + TO + '\r\nSubject: ' + SUBJECT_success + '\r\n\r\n' + BODY_success
SMTPSERVER = 'localhost'
sendserver = smtp(SMTPSERVER)
errors = sendserver.sendmail(FROM, TO, message)
sendserver.quit()
if len(errors) != 0:
lp = open('welcome_errors', 'a')
for eachline in errors:
lp.write(eachline+'\n')
lp.write('\n\n')
lp.close()
while True:
sleep(10)
welcome_bot()
| Python | 0.000122 | |
a892a389cfc94ebf72579ed6888c02463cdf7e6d | add moviepy - text_erscheinen_lassen_rechts2links.py | moviepy/text_erscheinen_lassen_rechts2links.py | moviepy/text_erscheinen_lassen_rechts2links.py | #!/usr/bin/env python
# Video mit Text erzeugen, Text von rechts nach links erscheinen lassen
# Einstellungen
text = 'Text' # Text
textgroesse = 150 # Textgroesse in Pixel
textfarbe_r = 0 # Textfarbe R
textfarbe_g = 0 # Textfarbe G
textfarbe_b = 0 # Textfarbe B
schrift = 'FreeSans' # Schriftart
winkel = 0 # Winkel
hgfarbe_r = 1 # Hintergrundfarbe R
hgfarbe_g = 1 # Hintergrundfarbe G
hgfarbe_b = 1 # Hintergrundfarbe B
videobreite = 1280 # in Pixel
videohoehe = 720 # in Pixel
videolaenge = 5 # in Sekunden
videodatei = 'text.ogv' # Videodatei
frames = 25 # Frames pro Sekunde
# Modul moviepy importieren
from moviepy.editor import *
# Modul gizeh importieren
import gizeh
# Funktion um Frames zu erzeugen, t ist die Zeit beim jeweiligen Frame
def create_frame(t):
img = gizeh.Surface(videobreite,videohoehe,bg_color=(hgfarbe_r,hgfarbe_g,hgfarbe_b))
text_img = gizeh.text(text, fontfamily=schrift, fontsize=textgroesse,
fill=(textfarbe_r,textfarbe_g,textfarbe_b),
xy=(videobreite/2,videohoehe/2), angle=winkel)
rect_img = gizeh.rectangle(lx=videobreite, ly=videohoehe, xy=(videobreite/2-t*videobreite/videolaenge,videohoehe/2), fill=(hgfarbe_r,hgfarbe_g,hgfarbe_b), angle=winkel)
text_img.draw(img)
rect_img.draw(img)
return img.get_npimage()
# Video erzeugen
video = VideoClip(create_frame, duration=videolaenge)
# Video schreiben
video.write_videofile(videodatei, fps=frames)
# Hilfe fuer moviepy: https://zulko.github.io/moviepy/index.html
# Hilfe fuer gizeh: https://github.com/Zulko/gizeh
# text_erscheinen_lassen_rechts2links.py
# Lizenz: http://creativecommons.org/publicdomain/zero/1.0/
# Author: openscreencast.de
| Python | 0.000006 | |
5db0ef459f4b0f0d3903578ae89bef7d0de7bf98 | add terminal test file | termtest.py | termtest.py | #!/usr/bin/python3
import termbox
t = termbox.Termbox()
t.clear()
width = t.width()
height = t.height()
cell_count = width * height
char = ord('a')
for c in range(1):
for i in range(26):
for y in range(height):
for x in range(width):
t.change_cell(x, y, char, termbox.WHITE, termbox.BLACK)
t.present()
char += 1
t.close()
| Python | 0.000001 | |
3d8667d2bfd75fe076b15b171e5c942a2a358508 | add basic is_unitary tests | test_gate.py | test_gate.py | import numpy as np
import unittest
import gate
class TestGate(unittest.TestCase):
def test_is_unitary(self):
qg = gate.QuantumGate(np.matrix('0 1; 1 0', np.complex_))
self.assertTrue(qg.is_unitary())
def test_is_not_unitary(self):
matrix = np.matrix('1 1; 1 0', np.complex_)
self.failUnlessRaises(Exception, gate.QuantumGate, matrix)
if __name__ == '__main__':
unittest.main()
| Python | 0.000017 | |
bf0a4ee5023cddd4072330e9a3e5a530aeea956e | test unit added | test_unit.py | test_unit.py | class test_output:
def run(self, queue):
while True:
item = queue.get()
print(item)
def mod_init():
return test_output()
| Python | 0 | |
33e2f5a0a11d5474b7a9f1ad3989575831f448ee | Add initial version of 'testbuild.py'. Currently this tests compilation of the CRYENGINE repo in win_x86/profile mode. Installed VS versions are discovered by querying the registry. Settings are in the script itself in the USED_* variables (to be abstracted later). Support for additional platforms and configs will be added later. | testbuild.py | testbuild.py | import os
import platform
import subprocess
# Group these here for transparency and easy editing.
USED_REPOSITORY = 'CRYENGINE'
USED_TARGET = 'win_x86'
USED_CONFIG = 'Profile'
USED_BRANCH = 'release'
USED_VS_VERSION = '14.0'
TARGET_TO_SLN_TAG = {
'win_x86': 'Win32',
'win_x64': 'Win64'
}
def get_installed_vs_versions():
"""
Query the registry to find installed VS versions. Assumes that C++ support has been installed.
Throws an exception if the expected version of VS is not present.
:return: None
"""
import winreg
# Open the Visual Studio registry key.
reg = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)
vskey = winreg.OpenKey(reg, r'SOFTWARE\Microsoft\VisualStudio')
subkeys = []
# Read all the subkeys
try:
i = 0
while True:
subkeys.append(winreg.EnumKey(vskey, i))
i += 1
except OSError:
pass
# If a subkey includes '.0' it's almost certainly a version number. I've yet to see one without that.
available_versions = [version for version in subkeys if '.0' in version]
if USED_VS_VERSION not in available_versions:
raise OSError('Visual Studio version {} is not installed (available: {}).'.format(USED_VS_VERSION,
available_versions))
def main():
"""
Get code from GitHub and perform an incremental build.
Assumes that the required SDKs directory is called 'SDKs' and is directly adjacent to the repo checkout directory.
"""
repository = USED_REPOSITORY
branch = USED_BRANCH
target = USED_TARGET
config = USED_CONFIG
build_dir = '_'.join([target, config.lower()])
steps = {
'clone': ['git', 'clone', 'https://github.com/CRYTEK-CRYENGINE/{repo}.git'.format(repo=repository)],
'pull': ['git', '-C', repository, 'pull'],
'checkout': ['git', 'checkout', branch],
# Quietly remove files that aren't tracked by git but leave the build folder in place (for incremental builds).
'clean': ['git', 'clean', '-dfq', '-e', 'Code/SDKs', '-e', build_dir],
# For now, assume Windows for convenience.
'configure': ['cmake', r'-DCMAKE_TOOLCHAIN_FILE=Tools\CMake\toolchain\windows\WindowsPC-MSVC.cmake', '..'],
'build': [os.path.normpath(r'C:\Program Files (x86)\MSBuild\{}\Bin\MSBuild.exe'.format(USED_VS_VERSION)),
'/property:Configuration={}'.format(config),
'CryEngine_CMake_{}.sln'.format(TARGET_TO_SLN_TAG.get(target))]
}
if os.path.exists(repository):
runstep(steps, 'pull')
else:
runstep(steps, 'clone')
os.chdir(repository)
runstep(steps, 'checkout')
runstep(steps, 'clean')
if os.path.exists(os.path.join('Code', 'SDKs')):
if platform.system() == 'Windows':
subprocess.check_call(['rmdir', r'Code\SDKs'], shell=True)
if not os.path.exists(os.path.join('Code', 'SDKs')):
if platform.system() == 'Windows':
subprocess.check_call(['mklink', '/J', r'Code\SDKs', r'..\SDKs'], shell=True)
print('Changing to build directory: {}'.format(build_dir))
if not os.path.exists(build_dir):
os.mkdir(build_dir)
os.chdir(build_dir)
runstep(steps, 'configure')
runstep(steps, 'build')
os.chdir('..')
if platform.system() == 'Windows':
subprocess.check_call(['rmdir', r'Code\SDKs'], shell=True)
runstep(steps, 'clean')
def runstep(steps, name):
"""
Run the command from *steps* corresponding to *name*.
:param steps: Dictionary of steps that can be run.
:param name: Name of the step to run.
"""
print('Running {} step with command "{}".'.format(name, ' '.join(steps[name])))
subprocess.check_call(steps[name])
if __name__ == '__main__':
main()
| Python | 0 | |
6764d0286f2386bef8ab5f627d061f45047956e9 | add logger | logger.py | logger.py | #!/usr/bin/env python
import logging
import os
from termcolor import colored
class ColorLog(object):
colormap = dict(
debug=dict(color='grey', attrs=['bold']),
info=dict(color='green'),
warn=dict(color='yellow', attrs=['bold']),
warning=dict(color='yellow', attrs=['bold']),
error=dict(color='red'),
critical=dict(color='red', attrs=['bold']),
)
def __init__(self, logger):
self._log = logger
def __getattr__(self, name):
if name in ['debug', 'info', 'warn', 'warning', 'error', 'critical']:
return lambda s, *args: getattr(self._log, name)(
colored(s, **self.colormap[name]), *args)
return getattr(self._log, name)
# Initialize logger
logging.basicConfig(format="%(levelname)s: %(name)s - %(message)s", level=logging.INFO)
fh = logging.FileHandler("timDIMM.log")
fh.setLevel(logging.DEBUG)
fh.setFormatter(logging.Formatter("%(asctime)s: %(levelname)s - %(name)s - %(message)s"))
| Python | 0.000026 | |
6a426523186180a345777b7af477c12473fd3aa0 | add human moderator actions to file | perspective_reddit_bot/check_mod_actions.py | perspective_reddit_bot/check_mod_actions.py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A reddit bot to detect which actions subreddit moderators actually took."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
from datetime import datetime
import json
import praw
import time
from creds import creds
def write_moderator_actions(reddit,
line,
id_key,
timestamp_key,
output_path,
hours_to_wait):
record = json.loads(line)
comment = reddit.comment(record[id_key])
maybe_wait(record[timestamp_key], hours_to_wait)
record['approved'] = comment.approved
record['removed'] = comment.removed
with open(output_path, 'a') as o:
json.dump(record, o)
o.write('\n')
def maybe_wait(timestamp, hours_to_wait):
"""Waits until hours_to_wait hours have passed since timestamp"""
now = datetime.utcnow()
time_diff = now - datetime.strptime(timestamp, '%Y%m%d_%H%M%S')
time_diff = time_diff.seconds
seconds_to_wait = hours_to_wait * 3600
if time_diff < seconds_to_wait:
time_to_wait = seconds_to_wait - time_diff
print('Waiting %.1f seconds...' % time_to_wait)
time.sleep(time_to_wait)
def _main():
parser = argparse.ArgumentParser(
'Reads the output of moderate_subreddit.py and adds actions taken by'
'human moderators.')
parser.add_argument('input_path', help='json file with reddit comment ids')
parser.add_argument('output_path', help='path to write output file')
parser.add_argument('-id_key', help='json key containing reddit comment id',
default='comment_id')
parser.add_argument('-timestamp_key', help='json key containing timestamp'
'that moderation bot saw comment',
default='bot_review_utc')
parser.add_argument('-hours_to_wait',
help='the number of hours to wait to allow moderators to'
' respond to bot',
type=int,
default=12)
parser.add_argument('-stop_at_eof',
help='if set, stops the process once the end of file is '
'hit instead of waiting for new comments to be written',
action='store_true')
args = parser.parse_args()
reddit = praw.Reddit(client_id=creds['reddit_client_id'],
client_secret=creds['reddit_client_secret'],
user_agent=creds['reddit_user_agent'],
username=creds['reddit_username'],
password=creds['reddit_password'])
with open(args.input_path) as f:
# Loops through the file and waits at EOF for new data to be written.
while True:
where = f.tell()
line = f.readline()
if line:
write_moderator_actions(reddit,
line,
args.id_key,
args.timestamp_key,
args.output_path,
args.hours_to_wait)
else:
if args.stop_at_eof:
return
else:
print('Reached EOF. Waiting for new data...')
time.sleep(args.hours_to_wait * 3600)
f.seek(where)
if __name__ == '__main__':
_main()
| Python | 0 | |
9cd3e1183b78f561751a638cf4e863703ec080d6 | add load ini file config | load_config.py | load_config.py | #!/usr/bin/env python
"""
conf file example
[elk-server]
ip = elk.server.ip
kibana = check_http
elasticsearch = check_http!-p 9200
logstash-3333 = check_tcp!3333
logstash-3334 = check_tcp!3334
load = check_nrpe!check_load
"""
import os, sys
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
parser = ConfigParser()
parser.read(sys.argv[1])
parser.sections()
for section in parser.sections():
os.system('./add_host.sh {} {}'.format(section, parser.get(section, 'ip')))
parser.remove_option(section, 'ip')
for service, command in parser.items(section):
os.system('./add_service_to_host.sh {} {} {}'.format(section, service, command.replace('/', r'\/')))
| Python | 0.000001 | |
e559a0458d1e4b0ec578eb9bcfdcc992d439a35d | Add test cases for the backwards compatibility in #24 | tests/test_backwards.py | tests/test_backwards.py | """ Test backwards-compatible behavior """
import json
from flywheel import Field, Model
from flywheel.fields.types import TypeDefinition, DictType, STRING
from flywheel.tests import DynamoSystemTest
class JsonType(TypeDefinition):
""" Simple type that serializes to JSON """
data_type = json
ddb_data_type = STRING
def coerce(self, value, force):
return value
def ddb_dump(self, value):
return json.dumps(value)
def ddb_load(self, value):
return json.loads(value)
class OldDict(Model):
""" Model that uses an old-style json field as a dict store """
__metadata__ = {
'_name': 'dict-test',
}
id = Field(hash_key=True)
data = Field(data_type=JsonType())
class TestOldJsonTypes(DynamoSystemTest):
""" Test the graceful handling of old json-serialized data """
models = [OldDict]
def setUp(self):
super(TestOldJsonTypes, self).setUp()
OldDict.meta_.fields['data'].data_type = JsonType()
def test_migrate_data(self):
""" Test graceful load of old json-serialized data """
old = OldDict('a', data={'a': 1})
self.engine.save(old)
OldDict.meta_.fields['data'].data_type = DictType()
new = self.engine.scan(OldDict).one()
self.assertEqual(new.data, old.data)
def test_resave_old_data(self):
""" Test the resaving of data that used to be json """
old = OldDict('a', data={'a': 1})
self.engine.save(old)
OldDict.meta_.fields['data'].data_type = DictType()
new = self.engine.scan(OldDict).one()
new.data['b'] = 2
new.sync(raise_on_conflict=False)
ret = self.engine.scan(OldDict).one()
self.assertEqual(ret.data, {'a': 1, 'b': 2})
| Python | 0 | |
501c38ac9e8b9fbb35b64321e103a0dfe064e718 | Add a sequence module for optimizing gating | QGL/BasicSequences/BlankingSweeps.py | QGL/BasicSequences/BlankingSweeps.py | """
Sequences for optimizing gating timing.
"""
from ..PulsePrimitives import *
from ..Compiler import compile_to_hardware
def sweep_gateDelay(qubit, sweepPts):
"""
Sweep the gate delay associated with a qubit channel using a simple Id, Id, X90, X90
seqeuence.
Parameters
---------
qubit : logical qubit to create sequences for
sweepPts : iterable to sweep the gate delay over.
"""
generator = qubit.physChan.generator
oldDelay = generator.gateDelay
for ct, delay in enumerate(sweepPts):
seqs = [[Id(qubit, length=120e-9), Id(qubit), MEAS(qubit)],
[Id(qubit, length=120e-9), MEAS(qubit)],
[Id(qubit, length=120e-9), X90(qubit), MEAS(qubit)],
[Id(qubit, length=120e-9), X90(qubit), MEAS(qubit)]]
generator.gateDelay = delay
compile_to_hardware(seqs, 'BlankingSweeps/GateDelay', suffix='_{}'.format(ct+1))
generator.gateDelay = oldDelay
| Python | 0 | |
213d1e65ebd6d2f9249d26c7ac3690d6bc6cde24 | fix encoding | manage.py | manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "geode_geocoding.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| Python | 0.274682 | |
6107d7fe1db571367a20143fa38fc6bec3056d36 | Fix port for activity script | scripts/activity.py | scripts/activity.py | #!/usr/bin/env python
import argparse
import collections
import itertools
import os
import random
import sys
import time
from contextlib import contextmanager
import logbook
sys.path.insert(0, os.path.join(os.path.abspath(os.path.dirname(__file__)), ".."))
from flask_app import app
from flask_app.smtp import smtpd_context
from mailboxer import Mailboxer
parser = argparse.ArgumentParser(usage="%(prog)s [options] args...")
parser.add_argument("--smtp-port", default=None, type=int)
parser.add_argument("--port", default=8000)
class Application(object):
def __init__(self, args):
self._args = args
def main(self):
client = Mailboxer("http://127.0.0.1:{0}".format(self._args.port))
mailboxes = collections.deque(maxlen=5)
with self._get_smtpd_context() as smtp:
for iteration in itertools.count():
if iteration % 3 == 0:
logbook.info("Creating mailbox (#{})", iteration)
mailboxes.append("mailbox{0}@demo.com".format(time.time()))
client.create_mailbox(mailboxes[-1])
logbook.info("Sending email... (#{})", iteration)
smtp.sendmail("noreply@demo.com", [random.choice(mailboxes)], "This is message no. {0}".format(iteration))
time.sleep(5)
return 0
@contextmanager
def _get_smtpd_context(self):
if self._args.smtp_port is None:
with smtpd_context() as result:
yield result
else:
yield SMTP("127.0.0.1", self._args.smtp_port)
#### For use with entry_points/console_scripts
def main_entry_point():
args = parser.parse_args()
app = Application(args)
sys.exit(app.main())
if __name__ == "__main__":
main_entry_point()
| #!/usr/bin/env python
import argparse
import collections
import itertools
import os
import random
import sys
import time
from contextlib import contextmanager
import logbook
sys.path.insert(0, os.path.join(os.path.abspath(os.path.dirname(__file__)), ".."))
from flask_app import app
from flask_app.smtp import smtpd_context
from mailboxer import Mailboxer
parser = argparse.ArgumentParser(usage="%(prog)s [options] args...")
parser.add_argument("--smtp-port", default=None, type=int)
parser.add_argument("--port", default=8080)
class Application(object):
def __init__(self, args):
self._args = args
def main(self):
client = Mailboxer("http://127.0.0.1:{0}".format(self._args.port))
mailboxes = collections.deque(maxlen=5)
with self._get_smtpd_context() as smtp:
for iteration in itertools.count():
if iteration % 3 == 0:
logbook.info("Creating mailbox (#{})", iteration)
mailboxes.append("mailbox{0}@demo.com".format(time.time()))
client.create_mailbox(mailboxes[-1])
logbook.info("Sending email... (#{})", iteration)
smtp.sendmail("noreply@demo.com", [random.choice(mailboxes)], "This is message no. {0}".format(iteration))
time.sleep(5)
return 0
@contextmanager
def _get_smtpd_context(self):
if self._args.smtp_port is None:
with smtpd_context() as result:
yield result
else:
yield SMTP("127.0.0.1", self._args.smtp_port)
#### For use with entry_points/console_scripts
def main_entry_point():
args = parser.parse_args()
app = Application(args)
sys.exit(app.main())
if __name__ == "__main__":
main_entry_point()
| Python | 0 |
df378f5c555f18ce48fb550ab07c85f779a31c60 | Add script to merge users with duplicate usernames | scripts/merge_duplicate_users.py | scripts/merge_duplicate_users.py | """Merge User records that have the same username. Run in order to make user collection
conform with the unique constraint on User.username.
"""
import sys
import logging
from modularodm import Q
from website.app import init_app
from website.models import User
from framework.mongo import database
from framework.transactions.context import TokuTransaction
from scripts import utils as script_utils
logger = logging.getLogger(__name__)
def find_primary_and_secondaries(users):
"""Given a list of users with the same username, find the user who should be the primary
user into which the other users will be merged. Return a tuple (primary_user, list_of_secondary_users)
"""
actives = [each for each in users if each.is_active]
# If there is only one active User, that user is the primary
if len(actives) == 1:
primary = actives[0]
# No active users, user who has earliest date_registered is primary
elif len(actives) == 0:
primary = sorted(users, key=lambda user: user.date_registered)[0]
# Multiple active users, take the user with latest date_last_login
else:
users_with_dll = [each for each in actives if each.date_last_login]
if len(users_with_dll) == 0:
raise AssertionError(
'Multiple active users with no date_last_login. '
'Perform the merge manually.'
)
else:
primary = sorted(users_with_dll, key=lambda user: user.date_last_login, reverse=True)[0]
secondaries = list(users)
secondaries.remove(primary)
return primary, secondaries
def main(dry=True):
duplicates = database.user.aggregate([
{
"$group": {
"_id": "$username",
"ids": {"$addToSet": "$_id"},
"count": {"$sum": 1}
}
},
{
"$match": {
"count": {"$gt": 1},
"_id": {"$ne": None}
}
},
{
"$sort": {
"count": -1
}
}
]).get('result')
# [
# {
# 'count': 5,
# '_id': 'duplicated@username.com',
# 'ids': [
# 'listo','fidst','hatma','tchth','euser','name!'
# ]
# }
# ]
logger.info('Found {} duplicate usernames.'.format(len(duplicates)))
for duplicate in duplicates:
logger.info(
'Found {} copies of {}: {}'.format(
len(duplicate.get('ids')),
duplicate.get('_id'),
', '.join(duplicate['ids'])
)
)
users = list(User.find(Q('_id', 'in', duplicate.get('ids'))))
primary, secondaries = find_primary_and_secondaries(users)
for secondary in secondaries:
logger.info('Merging user {} into user {}'.format(secondary._id, primary._id))
# don't just rely on the toku txn and prevent a call to merge_user
# when doing a dry run because merge_user does more than just
# db updateds (mailchimp calls, elasticsearch, etc.)
if not dry:
with TokuTransaction():
primary.merge_user(secondary)
primary.save()
secondary.save()
logger.info('Finished migrating {} usernames'.format(len(duplicates)))
if __name__ == "__main__":
dry = '--dry' in sys.argv
if not dry:
script_utils.add_file_logger(logger, __file__)
init_app(set_backends=True, routes=False)
main(dry=dry)
| Python | 0.000001 | |
6c94617d8ea2b66bba6c33fdc9aa81c5161a53f8 | add yaml | marcov.py | marcov.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#twitterBot.py
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
#use python-twitter
import twitter
import MeCab
import random
import re
import yaml
_var = open("../API.yaml").read()
_yaml = yaml.load(_var)
api = twitter.Api(
consumer_key = _yaml["consumer_key0"],
consumer_secret = _yaml["consumer_secret0"],
access_token_key = _yaml["access_token0"],
access_token_secret = _yaml["access_token_secret0"]
)
def wakati(text):
t = MeCab.Tagger("-Owakati")
m = t.parse(text)
result = m.rstrip(" \n").split(" ")
return result
def markov(src):
wordlist = wakati(src)
markov = {}
w1=''
for word in wordlist:
if w1:
if(w1)not in markov:
markov[(w1)] = []
markov[(w1)].append(word)
w1=word
count = 0
sentence=''
w1=random.choice(markov.keys())
#カウント数はおこのみで
while count < 50:
if markov.has_key((w1))==True:
tmp = random.choice(markov[(w1)])
sentence += tmp
w1=tmp
count += 1
return sentence
def tweet_friends():
i=0
j=0
friends = api.GetFriends()
tweets = ''
for i in range(len(friends)):
friend_timeline = api.GetUserTimeline(screen_name=friends[i].screen_name)
for j in range(len(friend_timeline)):
#他の人へのツイートは除外
if "@" not in friend_timeline[j].text:
tweets+=friend_timeline[j].text
tweets=str(tweets)
tweets=re.sub('https?://[\w/:%#\$&\?\(\)~\.=\+\-]+',"",tweets)
FriendsTweet = marcov(tweets)
return FriendsTweet
def tweet_own():
i=0
own = api.GetUserTimeline(screen_name='geo_ebi',count=100)
tweets=''
for i in range(len(own)):
if "@" not in own[i].text:
tweets+=own[i].text
tweets=str(tweets)
tweets=re.sub('https?://[\w/:%#\$&\?\(\)~\.=\+\-]+',"",tweets)
OwnTweet = markov(tweets)
return OwnTweet
if random.random()<0.5:
Bot = tweet_own()
print(Bot)
status = api.PostUpdate(Bot)
else:
Bot = tweet_friends()
print(Bot)
status = api.PostUpdate(Bot)
| Python | 0.000067 | |
78ea6bf390d2a2aa787c9bb37c12cf47a13357ce | Create marvin.py | marvin.py | marvin.py | """
Original code by Charles Leifer
https://github.com/coleifer/irc/blob/master/bots/markov.py
"""
#!/usr/bin/python
import os
import pickle
import random
import re
import sys
from irc import IRCBot, IRCConnection
class MarkovBot(IRCBot):
"""
Hacking on a markov chain bot - based on:
http://code.activestate.com/recipes/194364-the-markov-chain-algorithm/
http://github.com/ericflo/yourmomdotcom
"""
messages_to_generate = 5
chattiness = .01
max_words = 15
chain_length = 2
stop_word = '\n'
filename = 'markov.db'
last = None
def __init__(self, *args, **kwargs):
super(MarkovBot, self).__init__(*args, **kwargs)
self.load_data()
def load_data(self):
if os.path.exists(self.filename):
fh = open(self.filename, 'rb')
self.word_table = pickle.loads(fh.read())
fh.close()
else:
self.word_table = {}
def save_data(self):
fh = open(self.filename, 'w')
fh.write(pickle.dumps(self.word_table))
fh.close()
def split_message(self, message):
words = message.split()
if len(words) > self.chain_length:
words.extend([self.stop_word] * self.chain_length)
for i in range(len(words) - self.chain_length):
yield (words[i:i + self.chain_length + 1])
def generate_message(self, person, size=15, seed_key=None):
person_words = len(self.word_table.get(person, {}))
if person_words < size:
return
if not seed_key:
seed_key = random.choice(self.word_table[person].keys())
message = []
for i in xrange(self.messages_to_generate):
words = seed_key
gen_words = []
for i in xrange(size):
if words[0] == self.stop_word:
break
gen_words.append(words[0])
try:
words = words[1:] + (random.choice(self.word_table[person][words]),)
except KeyError:
break
if len(gen_words) > len(message):
message = list(gen_words)
return ' '.join(message)
def imitate(self, sender, message, channel):
person = message.replace('imitate ', '').strip()[:10]
if person != self.conn.nick:
return self.generate_message(person)
def cite(self, sender, message, channel):
if self.last:
return self.last
def sanitize_message(self, message):
"""Convert to lower-case and strip out all quotation marks"""
return re.sub('[\"\']', '', message.lower())
def log(self, sender, message, channel):
sender = sender[:10]
self.word_table.setdefault(sender, {})
if message.startswith('/'):
return
try:
say_something = self.is_ping(message) or sender != self.conn.nick and random.random() < self.chattiness
except AttributeError:
say_something = False
messages = []
seed_key = None
if self.is_ping(message):
message = self.fix_ping(message)
for words in self.split_message(self.sanitize_message(message)):
key = tuple(words[:-1])
if key in self.word_table:
self.word_table[sender][key].append(words[-1])
else:
self.word_table[sender][key] = [words[-1]]
if self.stop_word not in key and say_something:
for person in self.word_table:
if person == sender:
continue
if key in self.word_table[person]:
generated = self.generate_message(person, seed_key=key)
if generated:
messages.append((person, generated))
if len(messages):
self.last, message = random.choice(messages)
return message
def load_log_file(self, filename):
fh = open(filename, 'r')
logline_re = re.compile('<\s*(\w+)>[^\]]+\]\s([^\r\n]+)[\r\n]')
for line in fh.readlines():
match = logline_re.search(line)
if match:
sender, message = match.groups()
self.log(sender, message, '', False, None)
def load_text_file(self, filename, sender):
fh = open(filename, 'r')
for line in fh.readlines():
self.log(sender, line, '', False, None)
def command_patterns(self):
return (
self.ping('^imitate \S+', self.imitate),
self.ping('^cite', self.cite),
('.*', self.log),
)
host = 'irc.yournetwork.net'
port = 6667
nick = 'Marvin'
conn = IRCConnection(host, port, nick)
markov_bot = MarkovBot(conn)
if len(sys.argv) > 1 and sys.argv[1] == '-log':
if len(sys.argv) == 3:
markov_bot.load_log_file(sys.argv[2])
elif len(sys.argv):
markov_bot.load_text_file(sys.argv[2], sys.argv[3])
else:
conn.connect()
conn.join('#yourchannel')
try:
conn.enter_event_loop()
except:
pass
markov_bot.save_data()
| Python | 0.000001 | |
edbb41e1f897d5e0bab5460d971ffd5917e6d1e6 | add peer task | teuthology/task/peer.py | teuthology/task/peer.py | import logging
import ceph_manager
import json
from teuthology import misc as teuthology
log = logging.getLogger(__name__)
def rados(remote, cmd):
log.info("rados %s" % ' '.join(cmd))
pre = [
'LD_LIBRARY_PATH=/tmp/cephtest/binary/usr/local/lib',
'/tmp/cephtest/enable-coredump',
'/tmp/cephtest/binary/usr/local/bin/ceph-coverage',
'/tmp/cephtest/archive/coverage',
'/tmp/cephtest/binary/usr/local/bin/rados',
'-c', '/tmp/cephtest/ceph.conf',
];
pre.extend(cmd)
proc = remote.run(
args=pre,
check_status=False
)
return proc.exitstatus
def task(ctx, config):
"""
Test peering.
"""
if config is None:
config = {}
assert isinstance(config, dict), \
'peer task only accepts a dict for configuration'
first_mon = teuthology.get_first_mon(ctx, config)
(mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
manager = ceph_manager.CephManager(
mon,
ctx=ctx,
logger=log.getChild('ceph_manager'),
)
while manager.get_osd_status()['up'] < 3:
manager.sleep(10)
manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
manager.wait_for_clean()
# something that is always there
dummyfile = '/etc/fstab'
# take on osd down
manager.kill_osd(2)
manager.mark_down_osd(2)
# kludge to make sure they get a map
rados(mon, ['-p', 'data', 'get', 'dummy', '-'])
manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
manager.wait_for_recovery()
# kill another and revive 2, so that some pgs can't peer.
manager.kill_osd(1)
manager.mark_down_osd(1)
manager.revive_osd(2)
manager.wait_till_osd_is_up(2)
manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
# look for down pgs
num_down_pgs = 0
pgs = manager.get_pg_stats()
for pg in pgs:
out = manager.raw_cluster_cmd('pg', pg['pgid'], 'query')
j = json.loads('\n'.join(out.split('\n')[1:]))
log.info("json is %s" % j)
assert j['state'] == pg['state']
if pg['state'].count('down'):
num_down_pgs += 1
# verify that it is blocked on osd.1
rs = j['recovery_state']
assert len(rs) > 0
assert rs[0]['name'] == 'Started/Primary/Peering/GetInfo'
assert rs[1]['name'] == 'Started/Primary/Peering'
assert rs[1]['blocked']
assert rs[1]['down_osds_we_would_probe'] == [1]
assert len(rs[1]['peering_blocked_by']) == 1
assert rs[1]['peering_blocked_by'][0]['osd'] == 1
assert num_down_pgs > 0
# bring it all back
manager.revive_osd(1)
manager.wait_till_osd_is_up(1)
manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
manager.wait_for_clean()
| Python | 0 | |
9dcc635d0d5239928415ecab7a5ddb5387f98dea | add mail.py | globe/mail.py | globe/mail.py | from flask_mail import Message
from globe import app, mail
def send_email(subject, sender, recipients, text_body, html_body):
msg = Message(subject, sender=sender[0], recipients=recipients)
msg.body = text_body
msg.html = html_body
mail.send(msg)
| Python | 0.000002 | |
56ad587d21abe5251be5ce5fced8e42f1d89c2f4 | Create tutorial1.py | tutorial1.py | tutorial1.py | from ggame import App
myapp = App()
myapp.run()
| Python | 0 | |
83b3c5128d579cd23b4f81133175827adb8a92df | Send requests to ceilometer-api using multiple threads | ceilorunner.py | ceilorunner.py | #!/usr/bin/env python
from ceilometerclient.shell import CeilometerShell
from ceilometerclient import client as ceiloclient
from ceilometerclient.v2 import options
import threading
import argparse
import sys
import os
import time
import pdb
"""
Replace functions will call the non-printing version of functions
Add to prevent seeing ton of out put on cm shell functions
Name them as same as the shell functions
"""
def do_meter_list(client, args):
return client.meters.list(q=options.cli_to_array(args.query))
def do_resource_list(client, args):
return client.resources.list(q=options.cli_to_array(args.query))
def do_resource_show(client, args):
return client.resources.get(args.resource_id)
def do_alarm_list(client, args):
return client.alarms.list(q=options.cli_to_array(args.query))
def do_statistics(client, args):
'''List the statistics for a meter.'''
aggregates = []
for a in args.aggregate:
aggregates.append(dict(zip(('func', 'param'), a.split("<-"))))
api_args = {'meter_name': args.meter,
'q': options.cli_to_array(args.query),
'period': args.period,
'groupby': args.groupby,
'aggregates': aggregates}
statistics = client.statistics.list(**api_args)
"""
Uses the env if no authentication args are given
"""
def get_ceilometer_api_client(args):
s = CeilometerShell()
api_version, parsed_args = s.parse_args(args)
return parsed_args, ceiloclient.get_client(api_version, **(parsed_args.__dict__))
def parse_args(args):
arg_parser = argparse.ArgumentParser(
prog='ceilometer',
)
arg_parser.add_argument('--num-threads', type=int, default=1)
arg_parser.add_argument('--num-iterations', type=int, default=1)
#arg_parser.add_argument('--input-file', default="~/.inputceilorunner")
return arg_parser.parse_known_args()
def enable_output(self):
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
def disable_output(self):
sys.stdout = os.devnull
sys.stderr = os.devnull
class CeiloCommandThread(threading.Thread):
def __init__(self, num_iterations, client, func, args):
threading.Thread.__init__(self, name=func.__name__)
self.num_iterations = num_iterations
self.client = client
self.func = self.get_function_to_call(func)
self.args = args
self.run_times = []
self.error_flag = False
self.error_str = ''
@property
def avg(self):
if self.run_times:
return sum(self.run_times) / len(self.run_times)
else:
return 0.0
@property
def sum(self):
if self.run_times:
return sum(self.run_times)
else:
return 0.0
@property
def min(self):
if self.run_times:
return min(self.run_times)
else:
return 0.0
@property
def max(self):
if self.run_times:
return max(self.run_times)
else:
return 0.0
@property
def error(self):
return self.error_str if self.error_flag else ''
def print_stats(self):
print "Function %s took %f sec (avg=%f, min=%f, max=%f) for %d iterations (%s)" \
% (self.name, self.sum, self.avg, self.min, self.max, self.num_iterations, self.error)
def get_function_to_call(self, shell_func):
replace_function = globals().get(shell_func.__name__, None)
return replace_function if replace_function else shell_func
def run(self):
try:
self.run_times = []
for _ in range(self.num_iterations):
t0 = time.time()
try:
self.func(self.client, self.args)
finally:
t1 = time.time()
self.run_times.append(t1 - t0)
except Exception as e:
self.error_flag = True
self.error_str = "error occured in thread %s" % str(e)
def main(args=None):
try:
if args is None:
args = sys.argv[1:]
local_args, ceilo_args = parse_args(args)
ceilo_client_args, client = get_ceilometer_api_client(ceilo_args)
threads = []
for _ in range(local_args.num_threads):
t = CeiloCommandThread(local_args.num_iterations,
client, ceilo_client_args.func, ceilo_client_args)
threads.append(t)
t.start()
total_runtimes = []
for i, t in enumerate(threads):
t.join()
#t.print_stats()
if t.error_flag:
t.print_stats()
total_runtimes = total_runtimes + t.run_times
gt_60 = sum(1 for t in threads if t.max > 60.0)
print "Threads that are greater than 60 secs %d", gt_60
print "num iter / thread, numthreads, ave, min, max = %d\t%d\t%f\t%f\t%f" % \
(local_args.num_iterations, len(threads),
sum(total_runtimes)/len(total_runtimes), min(total_runtimes), max(total_runtimes))
except Exception as e:
print "CeiloRunner: Unknown error ", str(e)
if __name__ == "__main__":
main()
| Python | 0 | |
ef8ad297634d2153d5a1675d7bb60b963f8c6abd | Add wrapper | cfn_wrapper.py | cfn_wrapper.py | # MIT Licensed, Copyright (c) 2015 Ryan Scott Brown <sb@ryansb.com>
import json
import logging
import urllib2
logger = logging.getLogger()
logger.setLevel(logging.INFO)
"""
Event example
{
"Status": SUCCESS | FAILED,
"Reason: mandatory on failure
"PhysicalResourceId": string,
"StackId": event["StackId"],
"RequestId": event["RequestId"],
"LogicalResourceId": event["LogicalResourceId"],
"Data": {}
}
"""
def wrap_user_handler(func, base_response=None):
def wrapper_func(event, context):
response = {
"StackId": event["StackId"],
"RequestId": event["RequestId"],
"LogicalResourceId": event["LogicalResourceId"],
"Status": "SUCCESS",
}
if base_response is not None:
response.update(base_response)
logger.debug("Received %s request with event: %s" % (event['RequestType'], json.dumps(event)))
try:
response.update(func(event, context))
except:
logger.exception("Failed to execute resource function")
response.update({
"Status": "FAILED",
"Reason": "Exception was raised while handling custom resource"
})
serialized = json.dumps(response)
logger.info("Responding to '%s' request with: %s" % (
event['RequestType'], serialized))
req = urllib2.Request(
event['ResponseURL'], data=serialized,
headers={'Content-Length': len(serialized),
'Content-Type': ''}
)
req.get_method = lambda: 'PUT'
try:
urllib2.urlopen(req)
logger.debug("Request to CFN API succeeded, nothing to do here")
except urllib2.HTTPError as e:
logger.error("Callback to CFN API failed with status %d" % e.code)
logger.error("Response: %s" % e.reason)
except urllib2.URLError as e:
logger.error("Failed to reach the server - %s" % e.reason)
return wrapper_func
class Resource(object):
_dispatch = None
def __init__(self):
self._dispatch = {}
def __call__(self, event, context):
request = event['RequestType']
logger.debug("Received {} type event. Full parameters: {}".format(request, json.dumps(event)))
return self._dispatch.get(request, self._succeed)(event, context)
def _succeed(self, event, context):
return {
'Status': 'SUCCESS',
'PhysicalResourceId': event.get('PhysicalResourceId', 'mock-resource-id'),
'Reason': 'Life is good, man',
'Data': {},
}
def create(self, wraps):
self._dispatch['Create'] = wrap_user_handler(wraps)
return wraps
def update(self, wraps):
self._dispatch['Update'] = wrap_user_handler(wraps)
return wraps
def delete(self, wraps):
self._dispatch['Delete'] = wrap_user_handler(wraps)
return wraps
| Python | 0.000004 | |
e62a705d464df21098123ada89d38c3e3fe8ca73 | Define a channel interface | zerorpc/channel_base.py | zerorpc/channel_base.py | # -*- coding: utf-8 -*-
# Open Source Initiative OSI - The MIT License (MIT):Licensing
#
# The MIT License (MIT)
# Copyright (c) 2014 François-Xavier Bourlet (bombela@gmail.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
class ChannelBase(object):
@property
def context(self):
raise NotImplementedError()
@property
def recv_is_supported(self):
raise NotImplementedError()
@property
def emit_is_supported(self):
raise NotImplementedError()
def close(self):
raise NotImplementedError()
def new_event(self, name, args, xheader=None):
raise NotImplementedError()
def emit_event(self, event, timeout=None):
raise NotImplementedError()
def emit(self, name, args, xheader=None, timeout=None):
event = self.new_event(name, args, xheader)
return self.emit_event(event, timeout)
def recv(self, timeout=None):
raise NotImplementedError()
| Python | 0.015718 | |
c19120e0123b76236d11f3523e2ebd64c00b9feb | Check import | homeassistant/components/thermostat/radiotherm.py | homeassistant/components/thermostat/radiotherm.py | """
homeassistant.components.thermostat.radiotherm
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Adds support for Radio Thermostat wifi-enabled home thermostats.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/thermostat.radiotherm.html
"""
import logging
import datetime
from urllib.error import URLError
from homeassistant.components.thermostat import (ThermostatDevice, STATE_COOL,
STATE_IDLE, STATE_HEAT)
from homeassistant.const import (CONF_HOST, TEMP_FAHRENHEIT)
REQUIREMENTS = ['radiotherm==1.2']
HOLD_TEMP = 'hold_temp'
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_devices, discovery_info=None):
""" Sets up the Radio Thermostat. """
try:
import radiotherm
except ImportError:
_LOGGER.exception(
"Unable to import radiotherm. "
"Did you maybe not install the 'radiotherm' package?")
return False
hosts = []
if CONF_HOST in config:
hosts = config[CONF_HOST]
else:
hosts.append(radiotherm.discover.discover_address())
if hosts is None:
_LOGGER.error("No radiotherm thermostats detected")
return
hold_temp = config.get(HOLD_TEMP, False)
tstats = []
for host in hosts:
try:
tstat = radiotherm.get_thermostat(host)
tstats.append(RadioThermostat(tstat, hold_temp))
except (URLError, OSError):
_LOGGER.exception("Unable to connect to Radio Thermostat: %s",
host)
add_devices(tstats)
class RadioThermostat(ThermostatDevice):
""" Represent a Radio Thermostat. """
def __init__(self, device, hold_temp):
self.device = device
self.set_time()
self._target_temperature = None
self._current_temperature = None
self._operation = STATE_IDLE
self._name = None
self.hold_temp = hold_temp
self.update()
@property
def name(self):
""" Returns the name of the Radio Thermostat. """
return self._name
@property
def unit_of_measurement(self):
""" Unit of measurement this thermostat expresses itself in. """
return TEMP_FAHRENHEIT
@property
def device_state_attributes(self):
""" Returns device specific state attributes. """
return {
"fan": self.device.fmode['human'],
"mode": self.device.tmode['human']
}
@property
def current_temperature(self):
""" Returns the current temperature. """
return round(self._current_temperature, 1)
@property
def operation(self):
""" Returns current operation. head, cool idle """
return self._operation
@property
def target_temperature(self):
""" Returns the temperature we try to reach. """
return round(self._target_temperature, 1)
def update(self):
self._current_temperature = self.device.temp['raw']
self._name = self.device.name['raw']
if self.device.tmode['human'] == 'Cool':
self._target_temperature = self.device.t_cool['raw']
self._operation = STATE_COOL
elif self.device.tmode['human'] == 'Heat':
self._target_temperature = self.device.t_heat['raw']
self._operation = STATE_HEAT
else:
self._operation = STATE_IDLE
def set_temperature(self, temperature):
""" Set new target temperature """
if self._operation == STATE_COOL:
self.device.t_cool = temperature
elif self._operation == STATE_HEAT:
self.device.t_heat = temperature
if self.hold_temp:
self.device.hold = 1
else:
self.device.hold = 0
def set_time(self):
""" Set device time """
now = datetime.datetime.now()
self.device.time = {'day': now.weekday(),
'hour': now.hour, 'minute': now.minute}
| """
homeassistant.components.thermostat.radiotherm
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Adds support for Radio Thermostat wifi-enabled home thermostats.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/thermostat.radiotherm.html
"""
import logging
import datetime
from urllib.error import URLError
from homeassistant.components.thermostat import (ThermostatDevice, STATE_COOL,
STATE_IDLE, STATE_HEAT)
from homeassistant.const import (CONF_HOST, TEMP_FAHRENHEIT)
REQUIREMENTS = ['radiotherm==1.2']
HOLD_TEMP = 'hold_temp'
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_devices, discovery_info=None):
""" Sets up the Radio Thermostat. """
import radiotherm
hosts = []
if CONF_HOST in config:
hosts = config[CONF_HOST]
else:
hosts.append(radiotherm.discover.discover_address())
if hosts is None:
_LOGGER.error("No radiotherm thermostats detected")
return
hold_temp = config.get(HOLD_TEMP, False)
tstats = []
for host in hosts:
try:
tstat = radiotherm.get_thermostat(host)
tstats.append(RadioThermostat(tstat, hold_temp))
except (URLError, OSError):
_LOGGER.exception("Unable to connect to Radio Thermostat: %s",
host)
add_devices(tstats)
class RadioThermostat(ThermostatDevice):
""" Represent a Radio Thermostat. """
def __init__(self, device, hold_temp):
self.device = device
self.set_time()
self._target_temperature = None
self._current_temperature = None
self._operation = STATE_IDLE
self._name = None
self.hold_temp = hold_temp
self.update()
@property
def name(self):
""" Returns the name of the Radio Thermostat. """
return self._name
@property
def unit_of_measurement(self):
""" Unit of measurement this thermostat expresses itself in. """
return TEMP_FAHRENHEIT
@property
def device_state_attributes(self):
""" Returns device specific state attributes. """
return {
"fan": self.device.fmode['human'],
"mode": self.device.tmode['human']
}
@property
def current_temperature(self):
""" Returns the current temperature. """
return round(self._current_temperature, 1)
@property
def operation(self):
""" Returns current operation. head, cool idle """
return self._operation
@property
def target_temperature(self):
""" Returns the temperature we try to reach. """
return round(self._target_temperature, 1)
def update(self):
self._current_temperature = self.device.temp['raw']
self._name = self.device.name['raw']
if self.device.tmode['human'] == 'Cool':
self._target_temperature = self.device.t_cool['raw']
self._operation = STATE_COOL
elif self.device.tmode['human'] == 'Heat':
self._target_temperature = self.device.t_heat['raw']
self._operation = STATE_HEAT
else:
self._operation = STATE_IDLE
def set_temperature(self, temperature):
""" Set new target temperature """
if self._operation == STATE_COOL:
self.device.t_cool = temperature
elif self._operation == STATE_HEAT:
self.device.t_heat = temperature
if self.hold_temp:
self.device.hold = 1
else:
self.device.hold = 0
def set_time(self):
""" Set device time """
now = datetime.datetime.now()
self.device.time = {'day': now.weekday(),
'hour': now.hour, 'minute': now.minute}
| Python | 0 |
10f7e5c8c1a2cdc84f706ccad041755b83c4953b | Create htmlsearch.py | htmlsearch.py | htmlsearch.py | import glob
print glob.glob("*.html")
arr = glob.glob("*.html")
i=0
k=[]
ray =[]
while i < len(arr):
file = open(arr[i], "r")
#print file.read()
k.append(file.read())
i = i+1
print k
'''
Outputs:
print print glob.glob("*.html")
['source.html', 'so.html']
print k
['google.com', 'socorop.com']
'''
| Python | 0.00002 | |
46eb1c2d10316eae4d85b3d689307e32ed763d07 | add 6-17.py | chapter6/6-17.py | chapter6/6-17.py | #!/usr/bin/env python
def myPop(myList):
if len(myList) == 0:
print "no more element to pop"
exit(1)
else:
result = myList[len(myList)-1]
myList.remove(result)
return result
def myPush(myList,element):
myList.append(element)
def main():
myList = []
for i in range(800,810,3):
myPush(myList,i)
print "myList push %s" % i
print "myList = %s" % myList
print "myList = %s" % myList
for i in range(4):
print "myList pop %s " % myPop(myList)
print "myList = %s" % myList
if __name__ == '__main__':
main()
| Python | 0.998825 | |
7faff0ae9ea4b8d72b42d1af992bb4c72cc745ff | test program to immediately connect and disconnect | test/client_immediate_disconnect.py | test/client_immediate_disconnect.py | #!/usr/bin/env python
import socket
host = socket.gethostname() # Get local machine name
port = 55555 # Reserve a port for your service.
s = socket.socket()
s.connect((host, port))
s.send("x")
s.close
| Python | 0 | |
5b6667de8b91232facec27bc11305513bb2ec3b3 | add demo tests for parameterization | test_parameters.py | test_parameters.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
import time
from selenium import webdriver
browser = webdriver.Firefox()
email_addresses = ["invalid_email", "another_invalid_email@", "not_another_invalid_email@blah"]
passwords = ["weak_password", "generic_password", "shitty_password"]
@pytest.mark.parametrize("email", email_addresses)
@pytest.mark.parametrize("password", passwords)
def test_assert_login_button_enabled(email, password):
browser.get("https://start.engagespark.com/sign-in/")
time.sleep(3)
browser.find_element_by_name("login").click()
browser.find_element_by_name("login").send_keys(email)
browser.find_element_by_name("password").click()
browser.find_element_by_name("password").send_keys(password)
@pytest.mark.parametrize("field_name, maxlength", [
("login", "75"),
("password", "128"),
])
def test_assert_field_maxlength(field_name, maxlength):
browser.get("https://start.engagespark.com/sign-in/")
time.sleep(3)
browser.find_element_by_name(field_name).get_attribute("maxlength") == maxlength
@pytest.mark.parametrize("email", [
"123@abc.org",
pytest.mark.xfail("blah"),
])
def test_assert_valid_email_entry(email):
browser.get("https://start.engagespark.com/sign-in/")
time.sleep(3)
browser.find_element_by_name("login").click()
browser.find_element_by_name("login").send_keys(email)
assert "@" in browser.find_element_by_name("login").get_attribute("value")
| Python | 0 | |
fe0acf649a8db08c0bafd00e76557e9b6020bc5a | Add example for spliting 2D variable from NetCDF | Scripts/netCDF_splitter2var_2D.py | Scripts/netCDF_splitter2var_2D.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
from netCDF4 import Dataset
import numpy as np
import pylab as pl
import calendar
# add extra's for copied function...
import os, sys, argparse
import datetime
"""
Split off 2D variable from file with other variables
Notes
----
- based on software carpentary example.
http://damienirving.github.io/capstone-oceanography/03-data-provenance.html
"""
#
# --- verbose and debug settings for main
VERBOSE=False
DEBUG=False
def main( filename=None, VarName='OLSON', verbose=False, debug=False ):
"""Run the program"""
# Get the file name and location
wd, fn = get_file_loc_and_name()
# name output file if name not given
if isinstance( filename, type(None) ):
filename = wd.split('/')[-2]
if debug:
print wd, fn, filename
inFile = wd+'/'+fn
# Set output name
outfile_name = inFile+'.out'
# Read input data
VarData, input_DATA = read_data(inFile, VarName=VarName)
# Set values?
# print type(VarData)
# print [ (i.shape, i.mean(), i.min(), i.max()) for i in VarData]
# VarData[VarData>1] = 1
# print [ (i.shape, i.mean(), i.min(), i.max()) for i in VarData]
# --- Write the output file
outfile = Dataset(outfile_name, 'w', format='NETCDF4')
set_global_atts(input_DATA, outfile)
copy_dimensions(input_DATA, outfile)
copy_variables(input_DATA, outfile, VarName=VarName)
# overwite data
outfile[VarName][:] = VarData
# Close file
outfile.close()
def get_file_loc_and_name( ):
""" Get file location and name """
# Use command line grab function
import sys
# Get arguments from command line
wd = sys.argv[1]
fn = sys.argv[2]
return wd, fn
def copy_dimensions(infile, outfile):
"""Copy the dimensions of the infile to the outfile"""
for dimName, dimData in iter(infile.dimensions.items()):
outfile.createDimension(dimName, len(dimData))
def copy_variables(infile, outfile, VarName='OLSON'):
"""
Create variables corresponding to the file dimensions
by copying from infile
"""
# Get vars
var_list = ['lon', 'lat', 'time']
# also consider LANDMAP value
var_list+=[VarName]
# Now loop
for var_name in var_list:
varin = infile.variables[var_name]
outVar = outfile.createVariable(var_name, varin.datatype,
varin.dimensions,
)
outVar[:] = varin[:]
var_atts = {}
for att in varin.ncattrs():
if not att == '_FillValue':
var_atts[att] = eval('varin.'+att)
outVar.setncatts(var_atts)
def read_data(ifile, VarName='OLSON'):
"""
Read data from ifile corresponding to the VarName
"""
input_DATA = Dataset(ifile)
VarData = input_DATA.variables[VarName][:]
return VarData, input_DATA
def set_global_atts(infile, outfile):
"""Set the global attributes for outfile.
Note that the global attributes are simply copied from infile.
"""
global_atts = {}
for att in infile.ncattrs():
global_atts[att] = eval('infile.'+att)
# set attributes
outfile.setncatts(global_atts)
if __name__ == "__main__":
main( verbose=VERBOSE, debug=DEBUG )
| Python | 0 | |
4aecc9be1e2e8074a20606e65db3f9e6283eb8d3 | add utils | uhura/exchange/utils.py | uhura/exchange/utils.py | """
Utilities and helper functions
"""
def get_object_or_none(model, **kwargs):
try:
return model.objects.get(**kwargs)
except model.DoesNotExist:
return None | Python | 0.000004 | |
071aa9f5465847fdda517d1a78c37f1dbfe69f9f | test mock | tests/mock_bank.py | tests/mock_bank.py | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
import os.path
sys.path.append(os.path.join(os.path.dirname(__file__),'..'))
from src.bank import Bank
from mock import MagicMock
thing = Bank()
| Python | 0.000002 | |
f3182c9651509d2e1009040601c23a78ed3e9b7c | Create laynger.py | laynger.py | laynger.py | #import sublime
import sublime_plugin
class laynger(sublime_plugin.TextCommand):
def run(self, edit, opt='center'):
window = self.view.window()
layout = window.get_layout()
if len(layout['cols']) > 3:
return
if opt == u'center':
layout['cols'][1] = 0.5
elif opt == u'right':
layout['cols'][1] += 0.01
else:
layout['cols'][1] -= 0.01
window.run_command('set_layout', layout)
| Python | 0.000001 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.