commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
6f7fd163106ec5f4346eaaef04ed9726a3289801 | add wrong reversesubstring problem solution | problems/reversesubstring.py | problems/reversesubstring.py | import sys
test = "aabbbbababaaabbab"
"""
Find a) the first occurrence of b in string
b) the longest list of only as in string, store final index
"""
def solution(string):
firstB = string.find('b')
print ((string, firstB))
if(firstB == -1):
return (0, 0)
longestA = 0
longestAIndex = 0
currentA = 0
currentAIndex = 0
for i in range(firstB, len(string)):
if (string[i] == 'a'):
print ("found a", str(i))
currentAIndex = i
currentA += 1
if(currentA > longestA):
longestA = currentA
longestAIndex = currentAIndex
if(string[i] == 'b'):
currentA = 0
return (firstB, longestAIndex)
if __name__ == '__main__':
if (len(sys.argv) > 1):
print(solution(sys.argv[1]))
else:
print(solution(test))
| Python | 0.995257 | |
9ff1b6ffa297199dc73042382c369fc7af0813fc | Create stress_test1.py | home/moz4r/Test/stress_test1.py | home/moz4r/Test/stress_test1.py | # stress test
from time import sleep
import random
leftPort = "COM3"
i01 = Runtime.createAndStart("i01", "InMoov")
sleep(1)
i01.startMouth()
i01.startHead(leftPort)
i01.startLeftHand(leftPort)
i01.head.jaw.map(0,180,85,110)
i01.startMouthControl(leftPort)
i01.leftHand.thumb.setVelocity(random.randint(100,300))
MoveRandomTimer = Runtime.start("MoveRandomTimer","Clock")
def MoveRandom(timedata):
i01.leftHand.thumb.moveTo(random.randint(50,130))
MoveRandomTimer.setInterval(random.randint(10000,11000))
i01.mouth.speak("voice test voice test")
MoveRandomTimer.addListener("pulse", python.name, "MoveRandom")
MoveRandomTimer.startClock()
| Python | 0.000033 | |
a183922bd275414259800e75fd78db980604fa20 | create thread3 | threading/thread3_join.py | threading/thread3_join.py | import threading
import time
def thread_job():
print('T1 start\n')
for i in range(10):
time.sleep(0.1)
print('T1 finish\n')
def T2_job():
print('T2 start\n')
print('T2 finish\n')
def main():
added_thread = threading.Thread(target=thread_job, name='T1')
thread2 = threading.Thread(target=T2_job, name='T2')
added_thread.start()
thread2.start()
thread2.join()
added_thread.join()
print('all done\n')
if __name__ == '__main__':
main() | Python | 0 | |
143dbdb6d0d9840c4991eadbb2f5459398a6ddae | Add a 'cache' which only caches ETOPO1 files. | joerd/store/cache.py | joerd/store/cache.py | from joerd.mkdir_p import mkdir_p
from joerd.plugin import plugin
from os import link
import os.path
class CacheStore(object):
"""
Every tile that gets generated requires ETOPO1. Rather than re-download
it every time (it's 446MB), we cache that file only.
This is a bit of a hack, and would be better replaced by a generic
fixed-size LRU/LFU cache. Even better if the cache could be shared
between multiple Joerd processes on the same host.
"""
def __init__(self, cfg):
create_fn = plugin('store', store_type, 'create')
self.store = create_fn('store', cfg['store'])
self.cache_dir = cfg['cache_dir']
def upload_all(self, d):
self.store.upload_all(d)
@contextmanager
def upload_dir(self):
with tmpdir() as t:
yield t
self.upload_all(t)
def exists(self, filename):
return self.store.exists(filename)
def get(self, source, dest):
if 'ETOPO1' in source:
cache_path = os.path.join(self.cache_dir, source)
if not os.path.exists(cache_path):
mkdir_p(os.path.dirname(cache_path))
self.store.get(source, cache_path)
# hard link to dest. this makes it non-portable, but means that
# we don't have to worry about whether GDAL supports symbolic
# links, and we don't have to worry about deleting files, as they
# are reference counted by the OS.
link(cache_path, dest)
else:
self.store.get(source, dest)
def create(cfg):
return CacheStore(cfg)
| Python | 0 | |
679ae2966f44a071630934c7b7d9eeb550a59223 | Create balance_array.py | balance_array.py | balance_array.py | '''
`Balance Array`
Find i in array A where: A[1] + A[2]...A[i-1] = A[i+1] + A[i+2]...A[len(A)]
Write a `balanceSum` function which take an integer array as input,
it should return the smallest i, where i is an index in the array such that
the sum of elements to its left is equal to the sum of elements to its right.
Note: There always exist a solution.
'''
'''
TODO: use `pytest` or the likes to run tests more easily.
'''
def balanceSum(A):
# Slow performance, need optimization
# Iterate from 1->N-1 instead of 0->N or 1->N+1, b/c the `balance` index
# can not be 0 or N, checking for them is pointless.
# Also iterate from 1->N-1 is obviously faster than 0->N or 1->N+1.
for i in range(1, len(A)):
left_sum = sum(A[:i-1])
right_sum = sum(A[i:])
if left_sum == right_sum:
return i
return None
def balanceSum2(A):
# currently is wrong
left_sum, right_sum = 0, sum(A)
for i, value in enumerate(A):
i += 1
if left_sum == right_sum:
return i
left_sum += A[i-1]
right_sum -= A[i]
print i, left_sum, right_sum
return None
def test_one(func):
inp = [4,1,2,3,3]
out = 3
if out != func(inp):
return False
return True
def test_two(func):
inp = [3,1,2,1]
out = 2
if out != func(inp):
return False
return True
def test_three(func):
inp = [3,1,3,1]
out = 2
if out != func(inp):
return False
return True
def main():
test_func = balanceSum
print test_one(test_func)
print test_two(test_func)
print test_three(test_func)
if __name__ == '__main__':
import sys
sys.exit(int(main() or 0))
| Python | 0.000521 | |
19dd8b925b188bc09eb85952db1f9f11db4c570e | add batch pics | batch_cut_pic.py | batch_cut_pic.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#function: 剪切更改图片尺寸大小
import os
import os.path
import sys, getopt, argparse
from PIL import Image
from change_pic_size_by_cut import CutImage
def main():
argc = len(sys.argv)
cmdargs = str(sys.argv)
parser = argparse.ArgumentParser(description="Tool for batch cut the image")
parser.add_argument('-f', '--fromdir', required=True, help='the directory path of the input file')
parser.add_argument('-H', '--height',type=int, required=True, help='height of the output file')
parser.add_argument('-W', '--width',type=int, required=True, help='width of the output file')
parser.add_argument('-d', '--outdir', required=True, help='the directory of the output file')
parser.add_argument('-T', '--type', required=False, help='the type of the output file: jpeg, git, png ,etc')
args = parser.parse_args()
fromdir = args.fromdir
outdir = args.outdir
width = args.width
height = args.height
if args.type == None:
type = 'png'
else:
type = args.type
for file in os.listdir(fromdir):
if file == "desktop.ini":
continue
filein = os.path.join(fromdir, file)
fileout = os.path.join(outdir, file)
try:
CutImage(filein, fileout, width, height, type)
except Exception as e:
print(e)
continue
if __name__ == '__main__':
main() | Python | 0 | |
41933fa83138f3572b899839a721b95b877d09e6 | Sample code for create customer payment profile | CustomerProfiles/create-customer-payment-profile.py | CustomerProfiles/create-customer-payment-profile.py | from authorizenet import apicontractsv1
from authorizenet.apicontrollers import *
merchantAuth = apicontractsv1.merchantAuthenticationType()
merchantAuth.name = '5KP3u95bQpv'
merchantAuth.transactionKey = '4Ktq966gC55GAX7S'
creditCard = apicontractsv1.creditCardType()
creditCard.cardNumber = "4111111111111111"
creditCard.expirationDate = "2020-12"
payment = apicontractsv1.paymentType()
payment.creditCard = creditCard
profile = apicontractsv1.customerPaymentProfileType()
profile.payment = payment
createCustomerPaymentProfile = apicontractsv1.createCustomerPaymentProfileRequest()
createCustomerPaymentProfile.merchantAuthentication = merchantAuth
createCustomerPaymentProfile.paymentProfile = profile
createCustomerPaymentProfile.customerProfileId = '36731856'
createCustomerPaymentProfileController = createCustomerPaymentProfileController(createCustomerPaymentProfile)
createCustomerPaymentProfileController.execute()
response = createCustomerPaymentProfileController.getresponse()
if (response.messages.resultCode=="Ok"):
print "Successfully created a customer payment profile with id: %s" % response.customerPaymentProfileId
else:
print "Failed to create customer payment profile %s" % response.messages.message[0].text
| Python | 0.996976 | |
a4c71bcefa255e3f2ec4fcbcd77e614669190250 | Set up change set delta lambda | cd/lambdas/change-set-delta-notification/lambda_function.py | cd/lambdas/change-set-delta-notification/lambda_function.py | # Invoked by: CodePipeline
# Returns: Error or status message
#
# Published messages about deltas between a CloudFormation stack change set and
# the current version of the stack. The stack parameter values for both the
# stack and change set are queried, compared, and the differences are sent as a
# message to the Slack relay.
#
# This should always callback to the CodePipeline API to indicate success or
# failure.
import boto3
import traceback
import json
import re
import os
code_pipeline = boto3.client('codepipeline')
cloudformation = boto3.client('cloudformation')
sns = boto3.client('sns')
def put_job_success(job, message):
print('Putting job success')
print(message)
code_pipeline.put_job_success_result(jobId=job['id'])
def put_job_failure(job, message):
print('Putting job failure')
print(message)
code_pipeline.put_job_failure_result(
jobId=job['id'],
failureDetails={'message': message, 'type': 'JobFailed'})
def parameters_delta_attachment(user_parameters):
stack_name = user_parameters['StackName']
change_set_name = user_parameters['ChangeSetName']
# Get current stack parameter values
stack = cloudformation.describe_stacks(StackName=stack_name)['Stacks'][0]
stack_parameters = stack['Parameters']
# Get new parameter values from change set
change_set = cloudformation.describe_change_set(
ChangeSetName=change_set_name,
StackName=stack_name
)
change_set_parameters = change_set['Parameters']
# Combine parameters from stack and change set
parameters = {}
for p in stack_parameters:
if not p['ParameterKey'] in parameters:
parameters[p['ParameterKey']] = {}
parameters[p['ParameterKey']]['StackValue'] = p['ParameterValue']
for p in change_set_parameters:
if not p['ParameterKey'] in parameters:
parameters[p['ParameterKey']] = {}
parameters[p['ParameterKey']]['ChangeSetValue'] = p['ParameterValue']
# Find values that have changed, and build strings that will be included in
# the Slack message describing the changes
deltas = []
for k, v in parameters.items():
if k == 'PipelineExecutionNonce':
continue
elif 'StackValue' not in v:
deltas.append(f"*{k}*: ❔ ➡ `{v['ChangeSetValue']}`")
elif 'ChangeSetValue' not in v:
deltas.append(f"*{k}*: `{v['StackValue']}` ➡ ❌")
elif v['StackValue'] != v['ChangeSetValue']:
before = v['StackValue']
after = v['ChangeSetValue']
if re.search(r'EcrImageTag', k) or re.search(r'GitCommit', k):
base = 'https://github.com/PRX'
slug = k.replace('EcrImageTag', '').replace('GitCommit', '')
repo = f'{slug}.prx.org'.replace('Infrastructure.prx.org', 'Infrastructure')
url = f'{base}/{repo}/compare/{before}...{after}'
deltas.append(f"*{k}*: `{before}` ➡ `<{url}|{after}>`")
else:
deltas.append(f"*{k}*: `{before}` ➡ `{after}`")
unchanged_count = len(parameters) - len(deltas)
return {
'title': 'Stack Parameters Delta',
'footer': f'Excludes {unchanged_count} unchanged parameters',
'mrkdwn_in': ['text'],
'text': '\n'.join(deltas)
}
def slack_message(notification):
return {
'channel': '#ops-deploys',
'username': 'AWS CodePipeline',
'icon_emoji': ':ops-codepipeline:',
'attachments': [
parameters_delta_attachment(notification)
]
}
def sns_message(notification):
return json.dumps(slack_message(notification))
def lambda_handler(event, context):
try:
print('Posting delta notification...')
job = event['CodePipeline.job']
cfg = job['data']['actionConfiguration']['configuration']
user_parameters = json.loads(cfg['UserParameters'])
sns.publish(
TopicArn=os.environ['SLACK_MESSAGE_RELAY_TOPIC_ARN'],
Message=sns_message(user_parameters)
)
# Cleanup
put_job_success(job, '')
return '...Done'
except Exception as e:
print('Function failed due to exception.')
print(e)
traceback.print_exc()
put_job_failure(job, 'Function exception: ' + str(e))
| Python | 0.000001 | |
f36a0d1d53b4a15d8ead51a54260946f293a8718 | add mac free memory script | mac_free.py | mac_free.py | #!/usr/bin/python
'''
Created on Jun 1, 2014
@author: jay
'''
import subprocess
import re
# Get process info
ps = subprocess.Popen(['ps', '-caxm', '-orss,comm'], stdout=subprocess.PIPE).communicate()[0]
vm = subprocess.Popen(['vm_stat'], stdout=subprocess.PIPE).communicate()[0]
# Iterate processes
processLines = ps.split('\n')
sep = re.compile('[\s]+')
rssTotal = 0 # kB
for row in range(1,len(processLines)):
rowText = processLines[row].strip()
rowElements = sep.split(rowText)
try:
rss = float(rowElements[0]) * 1024
except:
rss = 0 # ignore...
rssTotal += rss
# Process vm_stat
vmLines = vm.split('\n')
sep = re.compile(':[\s]+')
vmStats = {}
for row in range(1,len(vmLines)-2):
rowText = vmLines[row].strip()
rowElements = sep.split(rowText)
vmStats[(rowElements[0])] = int(rowElements[1].strip('\.')) * 4096
print 'Wired Memory:\t\t%d MB' % ( vmStats["Pages wired down"]/1024/1024 )
print 'Active Memory:\t\t%d MB' % ( vmStats["Pages active"]/1024/1024 )
print 'Inactive Memory:\t%d MB' % ( vmStats["Pages inactive"]/1024/1024 )
print 'Free Memory:\t\t%d MB' % ( vmStats["Pages free"]/1024/1024 )
print 'Real Mem Total (ps):\t%.3f MB' % ( rssTotal/1024/1024 ) | Python | 0 | |
27921a107ef0e6a88755ae897998cab0095cf039 | Create mafengwo.py | mafengwo.py | mafengwo.py | # -*- coding: utf-8 -*-
import urllib
import urllib2
import re
from platform import python_version
class HtmlTools:
BngCharToNoneRex = re.compile(r'(\t|\n| |<a.*?>|<img.*?>)')
EndCharToNoneRex = re.compile(r'<.*?>')
BngPartRex = re.compile(r'<p.*?>')
CharToNewLineRex = re.compile(r'(<br/>|</p>|)<tr>|</?div>')
CharToNextTabRex = re.compile(r'<td>')
ReplaceTable = [
('&','\"'),
]
def ReplaceChar(self,x):
x = self.BngCharToNoneRex.sub('',x)
x = self.EndCharToNoneRex.sub('',x)
x = self.BngPartRex.sub('\n',x)
x = self.CharToNextTabRex.sub('\t',x)
for tab in self.ReplaceTable:
x = x.replace(tab[0],tab[1])
return x
class MaFengWo:
def __init__(self,urlPara="http://www.mafengwo.cn/u/347077.html",pagePara=1,):
self.page = pagePara
self.pages = []
self.url = "http://www.mafengwo.cn/u/347077.html"
self.HtmlTool = HtmlTools()
#从字符串中提取数字
def GetNums(self,uniString):
#得到的参数为unicode编码
NumString = filter(unicode.isdigit,uniString)
return NumString
#取得网页
def GetPages(self):
myUrl = self.url
myUrlReq = urllib2.Request(myUrl)
myUrlReq.add_header('User-Agent','Mozilla/4.0')
try:
myResp = urllib2.urlopen(myUrlReq)
myPage = myResp.read()
myResp.close()
print 'Connect Success!'
return myPage
except:
print 'Fail to connect %s' % self.url
return None
#获取文章标题
def GetTitles(self,page):
unicodePage = page.decode('utf-8')
reObj = re.compile(r'<h2>(.*?)</h2>')
titles = reObj.findall(unicodePage)
return titles
#获取文章中目的地
def GetSenicSpot(self,page):
unicodePage = page.decode('utf-8')
s = r'<a.*href="/travel-scenic-spot/.*>(.*?)</a></span>'
reObj = re.compile(r'travel-scenic-spot(.*?)</a></span>',re.S)
senicSpots = reObj.findall(unicodePage)
return senicSpots
#获取用户名
def GetUser(self,page):
unicodePage = page.decode('utf-8')
reObj = re.compile(r'<li.*?class="name">(.*?)</li>')
user = reObj.findall(unicodePage)
return user
def GetCity(self,page):
unicodePage = page.decode('utf-8')
reObj = re.compile(r'<li.*?class="city">(.*?)</li>',re.S)
city = reObj.findall(unicodePage)
return city
def GetGender(self,page):
unicodePage = page.decode('utf-8')
reObj = re.compile(r'<li.*?class="gender">(.*?)</li>',re.S)
genders = reObj.findall(unicodePage)
for gender in genders:
if gender.find('girl')>1:
gender = 'female'
return gender
else:
gender = 'male'
return gender
def GetPageNumRaw(self,page):
unicodePage = page.decode('utf-8')
reObj = re.compile(r'<div.*?class="turn_page">(.*?)</div>',re.S)
pageNumRaws = reObj.findall(unicodePage)
for pageNumRaw in pageNumRaws:
return pageNumRaw
def GetCurrentPageNum(self,pageNumRaw):
reObj = re.compile(r'this-page(.*?)</span>',re.S)
currentPageNums = reObj.findall(pageNumRaw)
for currentPageNum in currentPageNums:
currentPageNum = self.GetNums(currentPageNum)
return currentPageNum
def GetTotalPageNum(self,pageNumRaw):
#pageNumRaw中<a></a>的个数n,再加1,即n+1,即为总页数
reObj = re.compile(r'<a.*?href.*?>(.*?)</a')
totalPageNums = reObj.findall(pageNumRaw)
totalPageNum = totalPageNums.__len__()
totalPageNum+=1
return totalPageNum
def GetPageUrl(self,pageNumRaw):
print 'nothing'
return 'nothing'
def startMaFengWo(self):
print 'This is startMaFengWo()========='
page = self.GetPages()
pageRaw = self.GetPageNumRaw(page)
print '=============current page==========='
currentPageNum = self.GetCurrentPageNum(pageRaw)
print '%s' % currentPageNum
print'==============number of total page===='
totalPageNum = self.GetTotalPageNum(pageRaw)
print '%s' % totalPageNum
print '==============the user==============='
users = self.GetUser(page)
for user in users:
user = self.HtmlTool.ReplaceChar(user)
print '%s' % user
print '=============the gender============='
gender = self.GetGender(page)
print '%s' % gender
print '=============the city================'
cities = self.GetCity(page)
for city in cities:
city = self.HtmlTool.ReplaceChar(city)
print '%s' % city
print '==============the title==============='
titles = self.GetTitles(page)
for title in titles:
title = self.HtmlTool.ReplaceChar(title)
print '%s' % title
print '==============senic spots=============='
senicSpots = self.GetSenicSpot(page)
print 'the length of senicSpots is %d' % senicSpots.__len__()
for senicSpot in senicSpots:
senicSpot = self.HtmlTool.ReplaceChar(senicSpot)
print '%s' % senicSpot
__author__ = 'WangZhenXuan'
if __name__ == '__main__':
maFengWo = MaFengWo()
maFengWo.startMaFengWo()
| Python | 0.000003 | |
d9be75200af8c63a4457b6fb6ee107f4e8aa1048 | Create medium_BinaryConverter.py | medium_BinaryConverter.py | medium_BinaryConverter.py | """
Convert from binary string to
integer
"""
def BinaryConverter(str):
return int(str,2)
print BinaryConverter(raw_input())
| Python | 0.000001 | |
73cfd55b6db4e8623ff7c5f8d0df7433e694f8c4 | Split dottable-dict logic into separate class. | metadatastore/document.py | metadatastore/document.py | import six
import mongoengine
from mongoengine.base.datastructures import BaseDict, BaseList
from mongoengine.base.document import BaseDocument
from bson.objectid import ObjectId
from datetime import datetime
from itertools import chain
from collections import MutableMapping
def _normalize(in_val):
"""
Helper function for cleaning up the mongoegine documents to be safe.
Converts Mongoengine.Document to mds.Document objects recursively
Converts:
- mongoengine.base.datastructures.BaseDict -> dict
- mongoengine.base.datastructures.BaseList -> list
- ObjectID -> str
Parameters
----------
in_val : object
Object to be sanitized
Returns
-------
ret : object
The 'sanitized' object
"""
if isinstance(in_val, BaseDocument):
return Document(in_val)
elif isinstance(in_val, BaseDict):
return {_normalize(k): _normalize(v) for k, v in six.iteritems(in_val)}
elif isinstance(in_val, BaseList):
return [_normalize(v) for v in in_val]
elif isinstance(in_val, ObjectId):
return str(in_val)
return in_val
class DottableMutableMapping(MutableMapping):
"""A dictionary where d.key is the same as d['key']"""
def __init__(self):
self._fields = set()
def __setattr__(self, k, v):
self.__dict__[k] = v
if not k.startswith('_'):
self._fields.add(k)
assert hasattr(self, k)
assert k in self.__dict__
def __delattr__(self, k):
del self.__dict__[k]
if not k.startswith('_'):
self._fields.remove(k)
assert k not in self._fields
def __iter__(self):
return iter(self._fields)
def __getitem__(self, key):
try:
return getattr(self, key)
except AttributeError:
raise KeyError(key)
def __delitem__(self, key):
delattr(self, key)
def __setitem__(self, key, val):
setattr(self, key, val)
def __len__(self):
return len(self._fields)
def __contains__(self, key):
return key in self._fields
class Document(DottableMutableMapping):
"""
Copy the data out of a mongoengine.Document, including nested Documents,
but do not copy any of the mongo-specific methods or attributes.
"""
def __init__(self, mongo_document):
"""
Parameters
----------
mongo_document : mongoengine.Document
"""
super(Document, self).__init__()
self._name = mongo_document.__class__.__name__
fields = set(chain(mongo_document._fields.keys(),
mongo_document._data.keys()))
for field in fields:
attr = getattr(mongo_document, field)
attr = _normalize(attr)
setattr(self, field, attr)
# For debugging, add a human-friendly time_as_datetime attribute.
if hasattr(self, 'time'):
self.time_as_datetime = datetime.fromtimestamp(self.time)
def __repr__(self):
return "<{0} Document>".format(self._name)
| import six
import mongoengine
from mongoengine.base.datastructures import BaseDict, BaseList
from mongoengine.base.document import BaseDocument
from bson.objectid import ObjectId
from datetime import datetime
from itertools import chain
from collections import MutableMapping
def _normalize(in_val):
"""
Helper function for cleaning up the mongoegine documents to be safe.
Converts Mongoengine.Document to mds.Document objects recursively
Converts:
- mongoengine.base.datastructures.BaseDict -> dict
- mongoengine.base.datastructures.BaseList -> list
- ObjectID -> str
Parameters
----------
in_val : object
Object to be sanitized
Returns
-------
ret : object
The 'sanitized' object
"""
if isinstance(in_val, BaseDocument):
return Document(in_val)
elif isinstance(in_val, BaseDict):
return {_normalize(k): _normalize(v) for k, v in six.iteritems(in_val)}
elif isinstance(in_val, BaseList):
return [_normalize(v) for v in in_val]
elif isinstance(in_val, ObjectId):
return str(in_val)
return in_val
class Document(MutableMapping):
"""
Copy the data out of a mongoengine.Document, including nested Documents,
but do not copy any of the mongo-specific methods or attributes.
"""
def __init__(self, mongo_document):
"""
Parameters
----------
mongo_document : mongoengine.Document
"""
self._fields = set()
self._name = mongo_document.__class__.__name__
fields = set(chain(mongo_document._fields.keys(),
mongo_document._data.keys()))
for field in fields:
attr = getattr(mongo_document, field)
attr = _normalize(attr)
setattr(self, field, attr)
# For debugging, add a human-friendly time_as_datetime attribute.
if hasattr(self, 'time'):
self.time_as_datetime = datetime.fromtimestamp(self.time)
def __setattr__(self, k, v):
self.__dict__[k] = v
if not k.startswith('_'):
self._fields.add(k)
assert hasattr(self, k)
assert k in self.__dict__
def __delattr__(self, k):
del self.__dict__[k]
if not k.startswith('_'):
self._fields.remove(k)
assert k not in self._fields
def __repr__(self):
return "<{0} Document>".format(self._name)
def __iter__(self):
return iter(self._fields)
def __getitem__(self, key):
try:
return getattr(self, key)
except AttributeError:
raise KeyError(key)
def __delitem__(self, key):
delattr(self, key)
assert key not in self._fields
def __setitem__(self, key, val):
setattr(self, key, val)
def __len__(self):
return len(self._fields)
def __contains__(self, key):
return key in self._fields
| Python | 0 |
b9034ca499ae8c0366ac8cd5ee71641f39c0ffba | Add taxonomy model and initiation | website/project/taxonomies/__init__.py | website/project/taxonomies/__init__.py | import json
import os
from website import settings
from modularodm import fields, Q
from modularodm.exceptions import NoResultsFound
from framework.mongo import (
ObjectId,
StoredObject,
utils as mongo_utils
)
@mongo_utils.unique_on(['id', '_id'])
class Subject(StoredObject):
_id = fields.StringField(primary=True, default=lambda: str(ObjectId()))
type = fields.StringField(required=True)
text = fields.StringField(required=True)
parent = fields.ForeignField('subject', index=True)
def ensure_taxonomies():
with open(
os.path.join(
settings.APP_PATH,
'website', 'static', 'plos_taxonomy.json'
)
) as fp:
taxonomy = json.load(fp)
# For now, only PLOS taxonomies are loaded, other types possibly considered in the future
type = 'plos'
for subject_path in taxonomy.get('data'):
subjects = subject_path.split('_')
text = subjects[-1]
parent = None
if len(subjects) > 1:
parent = subjects[-2]
try:
subject = Subject.find_one(
Q('text', 'eq', text) &
Q('type', 'eq', type)
)
except NoResultsFound:
subject = Subject(
type = type,
text = text,
parent = parent
)
else:
subject.type = type
subject.text = text
subject.parent = parent
subject.save() | Python | 0.000001 | |
e747714e16250f3c2e85d09520f36953b1c417c3 | Create HeapSort.py | Algorithms/Sort_Algorithms/Heap_Sort/HeapSort.py | Algorithms/Sort_Algorithms/Heap_Sort/HeapSort.py | # Python program for implementation of heap Sort
# To heapify subtree rooted at index i.
# n is size of heap
def heapify(arr, n, i):
largest = i # Initialize largest as root
l = 2 * i + 1 # left = 2*i + 1
r = 2 * i + 2 # right = 2*i + 2
# See if left child of root exists and is
# greater than root
if l < n and arr[i] < arr[l]:
largest = l
# See if right child of root exists and is
# greater than root
if r < n and arr[largest] < arr[r]:
largest = r
# Change root, if needed
if largest != i:
arr[i],arr[largest] = arr[largest],arr[i] # swap
# Heapify the root.
heapify(arr, n, largest)
# The main function to sort an array of given size
def heapSort(arr):
n = len(arr)
# Build a maxheap.
for i in range(n, -1, -1):
heapify(arr, n, i)
# One by one extract elements
for i in range(n-1, 0, -1):
arr[i], arr[0] = arr[0], arr[i] # swap
heapify(arr, i, 0)
# Driver code to test above
arr = [ 12, 11, 13, 5, 6, 7]
heapSort(arr)
n = len(arr)
print ("Sorted array is")
for i in range(n):
print ("%d" %arr[i]),
| Python | 0.000001 | |
5c0730d7caef6503e3f97849d9df6825c289e9a0 | Fix check for valid emoji. | zerver/views/reactions.py | zerver/views/reactions.py | from __future__ import absolute_import
from django.http import HttpRequest, HttpResponse
from django.utils.translation import ugettext as _
from typing import Text
from zerver.decorator import authenticated_json_post_view,\
has_request_variables, REQ, to_non_negative_int
from zerver.lib.actions import do_add_reaction, do_remove_reaction
from zerver.lib.bugdown import emoji_list
from zerver.lib.message import access_message
from zerver.lib.request import JsonableError
from zerver.lib.response import json_success
from zerver.models import Reaction, Realm, UserProfile
def check_valid_emoji(realm, emoji_name):
# type: (Realm, Text) -> None
if emoji_name in set(realm.get_emoji().keys()):
return
if emoji_name in emoji_list:
return
raise JsonableError(_("Emoji '%s' does not exist" % (emoji_name,)))
@has_request_variables
def add_reaction_backend(request, user_profile, message_id, emoji_name):
# type: (HttpRequest, UserProfile, int, Text) -> HttpResponse
# access_message will throw a JsonableError exception if the user
# cannot see the message (e.g. for messages to private streams).
message = access_message(user_profile, message_id)[0]
check_valid_emoji(message.sender.realm, emoji_name)
# We could probably just make this check be a try/except for the
# IntegrityError from it already existing, but this is a bit cleaner.
if Reaction.objects.filter(user_profile=user_profile,
message=message,
emoji_name=emoji_name).exists():
raise JsonableError(_("Reaction already exists"))
do_add_reaction(user_profile, message, emoji_name)
return json_success()
@has_request_variables
def remove_reaction_backend(request, user_profile, message_id, emoji_name):
# type: (HttpRequest, UserProfile, int, Text) -> HttpResponse
# access_message will throw a JsonableError exception if the user
# cannot see the message (e.g. for messages to private streams).
message = access_message(user_profile, message_id)[0]
check_valid_emoji(message.sender.realm, emoji_name)
# We could probably just make this check be a try/except for the
# IntegrityError from it already existing, but this is a bit cleaner.
if not Reaction.objects.filter(user_profile=user_profile,
message=message,
emoji_name=emoji_name).exists():
raise JsonableError(_("Reaction does not exist"))
do_remove_reaction(user_profile, message, emoji_name)
return json_success()
| from __future__ import absolute_import
from django.http import HttpRequest, HttpResponse
from django.utils.translation import ugettext as _
from typing import Text
from zerver.decorator import authenticated_json_post_view,\
has_request_variables, REQ, to_non_negative_int
from zerver.lib.actions import do_add_reaction, do_remove_reaction
from zerver.lib.bugdown import emoji_list
from zerver.lib.message import access_message
from zerver.lib.request import JsonableError
from zerver.lib.response import json_success
from zerver.models import Reaction, UserProfile
@has_request_variables
def add_reaction_backend(request, user_profile, message_id, emoji_name):
# type: (HttpRequest, UserProfile, int, Text) -> HttpResponse
# access_message will throw a JsonableError exception if the user
# cannot see the message (e.g. for messages to private streams).
message = access_message(user_profile, message_id)[0]
existing_emojis = set(message.sender.realm.get_emoji().keys()) or set(emoji_list)
if emoji_name not in existing_emojis:
raise JsonableError(_("Emoji '%s' does not exist" % (emoji_name,)))
# We could probably just make this check be a try/except for the
# IntegrityError from it already existing, but this is a bit cleaner.
if Reaction.objects.filter(user_profile=user_profile,
message=message,
emoji_name=emoji_name).exists():
raise JsonableError(_("Reaction already exists"))
do_add_reaction(user_profile, message, emoji_name)
return json_success()
@has_request_variables
def remove_reaction_backend(request, user_profile, message_id, emoji_name):
# type: (HttpRequest, UserProfile, int, Text) -> HttpResponse
# access_message will throw a JsonableError exception if the user
# cannot see the message (e.g. for messages to private streams).
message = access_message(user_profile, message_id)[0]
existing_emojis = set(message.sender.realm.get_emoji().keys()) or set(emoji_list)
if emoji_name not in existing_emojis:
raise JsonableError(_("Emoji '%s' does not exist" % (emoji_name,)))
# We could probably just make this check be a try/except for the
# IntegrityError from it already existing, but this is a bit cleaner.
if not Reaction.objects.filter(user_profile=user_profile,
message=message,
emoji_name=emoji_name).exists():
raise JsonableError(_("Reaction does not exist"))
do_remove_reaction(user_profile, message, emoji_name)
return json_success()
| Python | 0.000062 |
122f24b24f16ab9ece5707919255371002929e8d | ADD RegisterTensorService | apps/domain/src/main/core/services/tensor_service.py | apps/domain/src/main/core/services/tensor_service.py | # stdlib
import secrets
from typing import List
from typing import Type
from typing import Union
# third party
from nacl.signing import VerifyKey
# syft relative
from syft.core.node.abstract.node import AbstractNode
from syft.core.node.common.service.auth import service_auth
from syft.core.node.common.service.node_service import ImmediateNodeServiceWithReply
from syft.core.node.common.service.node_service import ImmediateNodeServiceWithoutReply
from syft.decorators.syft_decorator_impl import syft_decorator
from syft.core.common.message import ImmediateSyftMessageWithReply
from syft.grid.messages.tensor_messages import (
CreateTensorMessage,
CreateTensorResponse,
GetTensorMessage,
GetTensorResponse,
UpdateTensorMessage,
UpdateTensorResponse,
DeleteTensorMessage,
DeleteTensorResponse,
GetTensorsMessage,
GetTensorsResponse,
)
@syft_decorator(typechecking=True)
def create_tensor_msg(
msg: CreateTensorMessage,
) -> CreateTensorResponse:
return CreateTensorResponse(
address=msg.reply_to,
success=True,
content={"msg": "tensor created succesfully!"},
)
@syft_decorator(typechecking=True)
def update_tensor_msg(
msg: UpdateTensorMessage,
) -> UpdateTensorResponse:
return UpdateTensorResponse(
address=msg.reply_to,
success=True,
content={"msg": "tensor changed succesfully!"},
)
@syft_decorator(typechecking=True)
def get_tensor_msg(
msg: GetTensorMessage,
) -> GetTensorResponse:
return GetTensorResponse(
address=msg.reply_to,
success=True,
content={
"tensor": {
"id": "5484626",
"tags": ["tensor-a"],
"description": "tensor sample",
}
},
)
@syft_decorator(typechecking=True)
def get_tensors_msg(
msg: GetTensorsMessage,
) -> GetTensorsResponse:
return GetTensorsResponse(
address=msg.reply_to,
success=True,
content={
"tensors": [
{
"id": "35654sad6ada",
"tags": ["tensor-a"],
"description": "tensor sample",
},
{
"id": "adfarf3f1af5",
"tags": ["tensor-b"],
"description": "tensor sample",
},
{
"id": "fas4e6e1fas",
"tags": ["tensor-c"],
"description": "tensor sample",
},
]
},
)
@syft_decorator(typechecking=True)
def del_tensor_msg(
msg: DeleteTensorMessage,
) -> DeleteTensorResponse:
return DeleteTensorResponse(
address=msg.reply_to,
success=True,
content={"msg": "tensor deleted succesfully!"},
)
class RegisterTensorService(ImmediateNodeServiceWithReply):
msg_handler_map = {
CreateTensorMessage: create_tensor_msg,
UpdateTensorMessage: update_tensor_msg,
GetTensorMessage: get_tensor_msg,
GetTensorsMessage: get_tensors_msg,
DeleteTensorMessage: del_tensor_msg,
}
@staticmethod
@service_auth(guests_welcome=True)
def process(
node: AbstractNode,
msg: Union[
CreateTensorMessage,
UpdateTensorMessage,
GetTensorMessage,
GetTensorsMessage,
DeleteTensorMessage,
],
verify_key: VerifyKey,
) -> Union[
CreateTensorResponse,
UpdateTensorResponse,
GetTensorResponse,
GetTensorsResponse,
DeleteTensorResponse,
]:
return RegisterTensorService.msg_handler_map[type(msg)](msg=msg)
@staticmethod
def message_handler_types() -> List[Type[ImmediateSyftMessageWithReply]]:
return [
CreateTensorMessage,
UpdateTensorMessage,
GetTensorMessage,
GetTensorsMessage,
DeleteTensorMessage,
]
| Python | 0 | |
17fcfd6d1962b23429d48a8a45dfb0944c2f1453 | Add constraints.py | conference_scheduler/constraints.py | conference_scheduler/constraints.py | from typing import Callable, List, Dict
class Constraint(NamedTuple):
function: Callable
args: List
kwargs: Dict
operator: Callable
value: int
| Python | 0.000003 | |
e9efb5e2ba19fcda77e35d0efdaa03b13d025df0 | create model of a feature | devmine/app/models/feature.py | devmine/app/models/feature.py | from sqlalchemy import (
Column,
Integer,
String
)
from devmine.app.models import Base
class Feature(Base):
"""Model of a feature."""
__tablename__ = 'features'
id = Column(Integer, primary_key=True)
name = Column(String, nullable=False, unique=True)
def __init__(self):
pass
| Python | 0.000001 | |
7491f500c75850c094158b4621fdef602bce3d27 | Add benchmarks for custom generators | benchmarks/benchmarks/benchmark_custom_generators.py | benchmarks/benchmarks/benchmark_custom_generators.py | from tohu.v6.primitive_generators import Integer, HashDigest, FakerGenerator
from tohu.v6.derived_generators import Apply, Lookup, SelectOne, SelectMultiple
from tohu.v6.custom_generator import CustomGenerator
from .common import NUM_PARAMS
mapping = {
'A': ['a', 'aa', 'aaa', 'aaaa', 'aaaaa'],
'B': ['b', 'bb', 'bbb', 'bbbb', 'bbbbb'],
'C': ['c', 'cc', 'ccc', 'cccc', 'ccccc'],
'D': ['d', 'dd', 'ddd', 'dddd', 'ddddd'],
'E': ['e', 'ee', 'eee', 'eeee', 'eeeee'],
'F': ['f', 'ff', 'fff', 'ffff', 'fffff'],
'G': ['g', 'gg', 'ggg', 'gggg', 'ggggg'],
}
class Quux1Generator(CustomGenerator):
aa = Integer(100, 200)
bb = HashDigest(length=8)
cc = FakerGenerator(method="name")
class Quux2Generator(CustomGenerator):
aa = SelectOne(['A', 'B', 'C', 'D', 'E', 'F', 'G'])
ll = Lookup(key=aa, mapping=mapping)
nn = Integer(1, 5)
bb = SelectMultiple(ll, num=nn)
class Quux3Generator(CustomGenerator):
bb = SelectMultiple(Lookup(SelectOne(['A', 'B', 'C', 'D', 'E', 'F', 'G']), mapping), num=Integer(1, 5))
class TimeBasicCustomGenerator:
params = NUM_PARAMS
def setup(self, num):
self.g1 = Quux1Generator()
def time_basic_custom_generator(self, num):
self.g1.generate(num=num)
class TimeComplexCustomGeneratorWithExplicitlyNamedFields:
params = NUM_PARAMS
def setup(self, num):
self.g2 = Quux2Generator()
def time_complex_custom_generator_with_explicitly_named_fields(self, num):
self.g2.generate(num=num)
class TimeComplexCustomGeneratorWithAnonymousFields:
params = NUM_PARAMS
def setup(self, num):
self.g3 = Quux3Generator()
def time_complex_custom_generator_with_anonymous_fields(self, num):
self.g3.generate(num=num)
| Python | 0 | |
63d45b975d33227b65e79644622773a49dd7ccc6 | Add new package: libxcrypt (#18783) | var/spack/repos/builtin/packages/libxcrypt/package.py | var/spack/repos/builtin/packages/libxcrypt/package.py | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Libxcrypt(AutotoolsPackage):
"""libxcrypt is a modern library for one-way hashing of passwords."""
homepage = "https://github.com/besser82/libxcrypt"
url = "https://github.com/besser82/libxcrypt/archive/v4.4.17.tar.gz"
version('4.4.17', sha256='7665168d0409574a03f7b484682e68334764c29c21ca5df438955a381384ca07')
version('4.4.16', sha256='a98f65b8baffa2b5ba68ee53c10c0a328166ef4116bce3baece190c8ce01f375')
version('4.4.15', sha256='8bcdef03bc65f9dbda742e56820435b6f13eea59fb903765141c6467f4655e5a')
depends_on('autoconf', type='build')
depends_on('automake', type='build')
depends_on('libtool', type='build')
depends_on('m4', type='build')
| Python | 0 | |
465b83e394c2bb90a85580946e291d0249fc754e | Fix model fields label | apps/accounts/migrations/0005_auto_20160101_1840.py | apps/accounts/migrations/0005_auto_20160101_1840.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0004_auto_20151227_1553'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='biography_text',
field=models.TextField(editable=False, verbose_name='Biography (raw text)', blank=True, default=''),
),
migrations.AlterField(
model_name='userprofile',
name='signature_text',
field=models.TextField(editable=False, verbose_name='Signature (raw text)', blank=True, default=''),
),
]
| Python | 0.000001 | |
6c599caaf8a4daadfe287898901cad54fda37875 | add Post model | XdaPy/model/post.py | XdaPy/model/post.py | # Copyright (C) 2014 cybojenix <anthonydking@slimroms.net>
#
# This file is part of XdaPy.
#
# XdaPy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# XdaPy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with XdaPy. If not, see <http://www.gnu.org/licenses/>.
class Post(object):
def __init__(self, data):
if data is None:
data = {}
assert hasattr(data, "get")
self.post_id = data.get("postid", "")
self.visible = bool(data.get("visible"))
self.user_id = data.get("userid", "")
self.title = data.get("title", "")
self.page_text = data.get("pagetext", "")
self.username = data.get("username", "")
self.date_line = data.get("dateline", "")
self.avatar_url = data.get("avatar_url", "")
self.thanks_count = data.get("thanks_count", "")
self.has_thanked = bool(data.get("has_thanked")) | Python | 0 | |
698e46f7842e16124235365a180ddee7532d11ff | Create 2017-02-20-fundamentaltheoremofarithmetic.py | _posts/2017-02-20-fundamentaltheoremofarithmetic.py | _posts/2017-02-20-fundamentaltheoremofarithmetic.py | #Fundamental theorem of arithmetic states that:every positive integer greater
#than one can be expressed as unique product of primes.for ex,90=2*3*3*5
#Following is an application of above theorem
def primefactors(n):
i=0
factors=[]
#here primelist is list of all primes of a given no
p=primelist[i]
while p<=n:
if n%p==0:
factors.append(p)
n //=p
else:
i +=1
p=primelist[i]
return factors
| Python | 0.000001 | |
201ca88243bf8d0736c5f61b64abeacba82e7da7 | Add memory.py | bandit/memory.py | bandit/memory.py | import numpy as np
class Memory(object):
"""
This is a memory saver for contextual bandit
"""
def __init__(self):
pass
| Python | 0.000065 | |
d720f0a50dce424ddbb319fd8cd518cc7adb3a1f | Add LBlock impementation | lblockSimple.py | lblockSimple.py | #!/usr/bin/env python3
"""
POC implementation of LBlock Cipher (http://eprint.iacr.org/2011/345.pdf)
"""
s0 = [14, 9, 15, 0, 13, 4, 10, 11, 1, 2, 8, 3, 7, 6, 12, 5]
s1 = [4, 11, 14, 9, 15, 13, 0, 10, 7, 12, 5, 6, 2, 8, 1, 3]
s2 = [1, 14, 7, 12, 15, 13, 0, 6, 11, 5, 9, 3, 2, 4, 8, 10]
s3 = [7, 6, 8, 11, 0, 15, 3, 14, 9, 10, 12, 13, 5, 2, 4, 1]
s4 = [14, 5, 15, 0, 7, 2, 12, 13, 1, 8, 4, 9, 11, 10, 6, 3]
s5 = [2, 13, 11, 12, 15, 14, 0, 9, 7, 10, 6, 3, 1, 8, 4, 5]
s6 = [11, 9, 4, 14, 0, 15, 10, 13, 6, 12, 5, 7, 3, 8, 1, 2]
s7 = [13, 10, 15, 0, 14, 4, 9, 11, 2, 1, 8, 3, 7, 5, 12, 6]
s8 = [8, 7, 14, 5, 15, 13, 0, 6, 11, 12, 9, 10, 2, 4, 1, 3]
s9 = [11, 5, 15, 0, 7, 2, 9, 13, 4, 8, 1, 12, 14, 10, 3, 6]
def bitstr(n, width=None):
"""return the binary representation of n as a string and
optionally zero-fill (pad) it to a given length
"""
result = list()
while n:
result.append(str(n % 2))
n = int(n / 2)
if (width is not None) and len(result) < width:
result.extend(['0'] * (width - len(result)))
result.reverse()
return ''.join(result)
def mask(n):
"""Return a bitmask of length n (suitable for masking against an
int to coerce the size to a given length)
"""
if n >= 0:
return 2 ** n - 1
else:
return 0
def rol(n, rotations=1, width=8):
"""Return a given number of bitwise left rotations of an integer n,
for a given bit field width.
"""
rotations %= width
if rotations < 1:
return n
n &= mask(width) # Should it be an error to truncate here?
return ((n << rotations) & mask(width)) | (n >> (width - rotations))
def ror(n, rotations=1, width=8):
"""Return a given number of bitwise right rotations of an integer n,
for a given bit field width.
"""
rotations %= width
if rotations < 1:
return n
n &= mask(width)
return (n >> rotations) | ((n << (width - rotations)) & mask(width))
def F(x):
return s6[(x & 0xf000000) >> 24] << 28 | \
s4[(x & 0xf0000) >> 16] << 24 | \
s7[(x & 0xf0000000) >> 28] << 20 | \
s5[(x & 0xf00000) >> 20] << 16 | \
s2[(x & 0xf00) >> 8] << 12 | \
s0[(x & 0xf) >> 0] << 8 | \
s3[(x & 0xf000) >> 12] << 4 | \
s1[(x & 0xf0) >> 4] << 0
def keySchedule(K):
RK = list()
RK.append((K & (mask(32) << 48)) >> 48) # 32 left most bits
for r in range(1, 32):
K = rol(K, rotations=29, width=80)
K = (s9[K >> 76] << 76) | (s8[(K >> 72) & 0xf] << 72) | (K & mask(72))
K ^= r << 46
RK.append((K & (mask(32) << 48)) >> 48) # 32 left most bits
return RK
def Enc(P, RK):
X1 = (P >> 32) & 0xffffffff
X0 = P & 0xffffffff
for r in range(32):
nextX = F(X1 ^ RK[r]) ^ rol(X0, rotations=8, width=32)
X0 = X1
X1 = nextX
return (X0 << 32) | X1
def Dec(P, RK):
X0 = (P >> 32) & 0xffffffff
X1 = P & 0xffffffff
for r in range(31, -1, -1):
prevX = ror(F(X0 ^ RK[r]) ^ X1, rotations=8, width=32)
X1 = X0
X0 = prevX
return (X1 << 32) | X0
def encrypt(plain: b'', key: b'') -> b'':
RK = keySchedule(key)
return Enc(plain, RK)
def decrypt(cipher: b'', key: b'') -> b'':
RK = keySchedule(key)
return Dec(cipher, RK)
if __name__ == '__main__':
# rKeys = Key_Schedule(0x0123456789abcdeffedc)
# for rKey in rKeys:
# print(hex(rKey))
key1 = 0x00000000000000000000
key2 = 0x0123456789abcdeffedc
enc1 = encrypt(plain=0x0000000000000000, key=key1)
enc2 = encrypt(plain=0x0123456789abcdef, key=key2)
dec1 = decrypt(cipher=enc1, key=key1)
dec2 = decrypt(cipher=enc2, key=key2)
print(hex(enc1))
print(hex(enc2))
print(hex(dec1))
print(hex(dec2))
# RK = Key_Schedule(0x0123456789abcdef)
# print(hex(Enc(0x0123456789abcdef, RK)))
| Python | 0.000001 | |
f72af94f29a1797f9f23dbfe3431ec66ff36e6b4 | add example | examples/py/wazirx-create-cancel-orders.py | examples/py/wazirx-create-cancel-orders.py | # -*- coding: utf-8 -*-
import os
import sys
from pprint import pprint
root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(root + '/python')
import ccxt # noqa: E402
print('CCXT Version:', ccxt.__version__)
exchange = ccxt.wazirx({
'enableRateLimit': True,
'apiKey': 'YOUR_API_KEY',
'secret': 'YOUR_SECRET',
'options': {
'defaultType': 'swap',
},
})
markets = exchange.load_markets()
symbol = 'LTC/USDT'
amount = 0.1
price = 20
# Opening limit order
order = exchange.create_order(symbol, 'limit', 'buy', amount, price)
pprint(order)
# Opening stop-limit order
order2 = exchange.create_order(symbol, 'limit', 'buy', amount, price, {"stopPrice": 70})
pprint(order2)
# Opening second limit order
order3 = exchange.create_order(symbol, 'limit', 'buy', amount, price)
pprint(order3)
# Canceling first limit order
response = exchange.cancel_order(order['id'], symbol)
print(response)
# Canceling all open orders (second and third order)
response = exchange.cancel_all_orders(symbol)
print(response) | Python | 0.000002 | |
48eb4604673513b771b6def05a1652ae1b66d4d0 | Add a script for storing a config variable | scripts/add_ssm_config.py | scripts/add_ssm_config.py | #!/usr/bin/env python
# -*- encoding: utf-8
"""
Store a config variable in SSM under the key structure
/{project_id}/config/{label}/{config_key}
This script can store a regular config key (unencrypted) or an encrypted key.
"""
import sys
import boto3
import click
ssm_client = boto3.client("ssm")
@click.command()
@click.option("--project_id", prompt="What is the project ID?", required=True)
@click.option("--label", default="prod", required=True)
@click.option("--config_key", prompt="What is the config key?", required=True)
@click.option("--config_value", prompt="What is the config value?", required=True)
def store_config_key(project_id, label, config_key, config_value):
ssm_name = f"/{project_id}/config/{label}/{config_key}"
resp = ssm_client.put_parameter(
Name=ssm_name,
Description=f"Config value populated by {__file__}",
Value=config_value,
Type="String",
Overwrite=True,
)
if resp["ResponseMetadata"]["HTTPStatusCode"] == 200:
print(f"{ssm_name} -> {config_value!r}")
else:
print(f"Unexpected error: {resp}")
sys.exit(1)
if __name__ == "__main__":
store_config_key()
| Python | 0.000003 | |
e85c07cfe614813180d9795e1fa4deda00e6b84e | add manual replication script my Max Dornseif | couchdb/tools/manual_replication.py | couchdb/tools/manual_replication.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2009 Maximillian Dornseif <md@hudora.de>
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
"""
This script replicates databases from one CouchDB server to an other.
This is mainly for backup purposes or "priming" a new server before
setting up trigger based replication. But you can also use the
'--continuous' option to set up automatic replication on newer
CouchDB versions.
Use 'python manual_replication.py --help' to get more detailed usage
instructions.
Be careful when using 127.0.0.1 as the source-server or target-server.
With pull replication you can use 127.0.0.1 on the target-server.
With push replication you can use 127.0.0.1 on the source-server.
But I suggest you always use Fully Qualified domain names.
"""
import couchdb.client
import optparse
import sys
import time
import httplib2
def compact(server, dbnames):
for dbname in dbnames:
sys.stdout.flush()
db = server[dbname]
db.resource.post('_compact')
def main():
usage = '%prog [options]'
parser = optparse.OptionParser(usage=usage)
parser.add_option('--source-server',
action='store',
dest='source_url',
help='the url of the server to replicate from')
parser.add_option('--target-server',
action='store',
dest='target_url',
default="http://127.0.0.1:5984",
help='the url of the server to replicate to [%default]')
parser.add_option('--database',
action='append',
dest='dbnames',
help='Database to replicate. Can be given more than once. [all databases]')
parser.add_option('--no-target-compaction',
action='store_false',
dest='compact_target',
help='do not start compaction of target after replications')
parser.add_option('--continuous',
action='store_true',
dest='continuous',
help='trigger continuous replication in cochdb')
parser.add_option('--push',
action='store_true',
help='use push instead of pull replication')
parser.add_option('--debug',
action='store_true',
dest='debug')
options, args = parser.parse_args()
if not options.target_url or (not options.source_url):
parser.error("Need at least --source-server and --target-server")
sys.exit(1)
if options.debug:
httplib2.debuglevel = 1
if not options.source_url.endswith('/'):
options.source_url = options.source_url + '/'
if not options.target_url.endswith('/'):
options.target_url = options.target_url + '/'
source_server = couchdb.client.Server(options.source_url)
target_server = couchdb.client.Server(options.target_url)
if not options.dbnames:
dbnames = source_server.resource.get('_all_dbs')[1]
dbnames.sort()
else:
dbnames = options.dbnames
for dbname in sorted(dbnames, reverse=True):
start = time.time()
print dbname,
sys.stdout.flush()
if dbname not in target_server.resource.get('_all_dbs')[1]:
target_server.create(dbname)
print "created",
sys.stdout.flush()
body = {}
if options.continuous:
body['continuous'] = True
if options.push:
body.update({'source': dbname, 'target': '%s%s' % (options.target_url, dbname)})
ret = source_server.resource.post('_replicate', body)
else:
# pull seems to be more reliable than push
body.update({'source': '%s%s' % (options.source_url, dbname), 'target': dbname})
ret = target_server.resource.post('_replicate', body)
print "%.1f s" % (time.time() - start)
if options.compact_target:
compact(target_server, dbnames)
if __name__ == '__main__':
main()
| Python | 0 | |
cd38a1a8845ade346f4532fa944f58dde4a64a27 | add multiple wr port config for RegisterFile | new_pmlib/RegisterFile.py | new_pmlib/RegisterFile.py | #=======================================================================
# RegisterFile.py
#=======================================================================
from new_pymtl import *
#=======================================================================
# RegisterFile
#=======================================================================
class RegisterFile( Model ):
#---------------------------------------------------------------------
# elaborate_logic()
#---------------------------------------------------------------------
def __init__( s, nbits=32, nregs=32, rd_ports=1, wr_ports=1,
const_zero=False ):
s.rd_ports = rd_ports
s.wr_ports = wr_ports
s.nregs = nregs
s.nbits = nbits
s.const_zero = const_zero
addr_bits = get_sel_nbits( nregs )
s.rd_addr = [ InPort( addr_bits ) for x in xrange(rd_ports) ]
s.rd_data = [ OutPort( nbits ) for x in xrange(rd_ports) ]
if wr_ports == 1:
s.wr_addr = InPort( addr_bits )
s.wr_data = InPort( nbits )
s.wr_en = InPort( 1 )
else:
s.wr_addr = [ InPort( addr_bits ) for x in range(wr_ports) ]
s.wr_data = [ InPort( nbits ) for x in range(wr_ports) ]
s.wr_en = [ InPort( 1 ) for x in range(wr_ports) ]
#---------------------------------------------------------------------
# elaborate_logic()
#---------------------------------------------------------------------
def elaborate_logic( s ):
s.regs = [ Wire( s.nbits ) for x in xrange( s.nregs ) ]
#-------------------------------------------------------------------
# Combinational read logic
#-------------------------------------------------------------------
@s.combinational
def comb_logic():
for i in xrange( s.rd_ports ):
assert s.rd_addr[i] < s.nregs
s.rd_data[i].value = s.regs[ s.rd_addr[i] ]
# Select write logic depending on if this register file should have
# a constant zero register or not!
#-------------------------------------------------------------------
# Sequential write logic, single write port
#-------------------------------------------------------------------
if s.wr_ports == 1 and not s.const_zero:
@s.posedge_clk
def seq_logic():
if s.wr_en:
s.regs[ s.wr_addr ].next = s.wr_data
#-------------------------------------------------------------------
# Sequential write logic, single write port, constant zero
#-------------------------------------------------------------------
elif s.wr_ports == 1:
@s.posedge_clk
def seq_logic_const_zero():
if s.wr_en and s.wr_addr != 0:
s.regs[ s.wr_addr ].next = s.wr_data
#-------------------------------------------------------------------
# Sequential write logic, multiple write ports
#-------------------------------------------------------------------
elif not s.const_zero:
@s.posedge_clk
def seq_logic_multiple_wr():
for i in range( s.wr_ports ):
if s.wr_en[i]:
s.regs[ s.wr_addr[i] ].next = s.wr_data[i]
#-------------------------------------------------------------------
# Sequential write logic, multiple write ports, constant zero
#-------------------------------------------------------------------
else:
@s.posedge_clk
def seq_logic_multiple_wr():
for i in range( s.wr_ports ):
if s.wr_en[i] and s.wr_addr[i] != 0:
s.regs[ s.wr_addr[i] ].next = s.wr_data[i]
def line_trace( s ):
return [x.uint() for x in s.regs]
| #=======================================================================
# RegisterFile.py
#=======================================================================
from new_pymtl import *
#=======================================================================
# RegisterFile
#=======================================================================
class RegisterFile( Model ):
#---------------------------------------------------------------------
# elaborate_logic()
#---------------------------------------------------------------------
def __init__( s, nbits=32, nregs=32, rd_ports=1, const_zero=False ):
s.rd_ports = rd_ports
s.nregs = nregs
s.nbits = nbits
s.const_zero = const_zero
addr_bits = get_sel_nbits( nregs )
s.rd_addr = [ InPort( addr_bits ) for x in xrange(rd_ports) ]
s.rd_data = [ OutPort( nbits ) for x in xrange(rd_ports) ]
s.wr_addr = InPort( addr_bits )
s.wr_data = InPort( nbits )
s.wr_en = InPort( 1 )
#---------------------------------------------------------------------
# elaborate_logic()
#---------------------------------------------------------------------
def elaborate_logic( s ):
s.regs = [ Wire( s.nbits ) for x in xrange( s.nregs ) ]
#-------------------------------------------------------------------
# Combinational read logic
#-------------------------------------------------------------------
@s.combinational
def comb_logic():
for i in xrange( s.rd_ports ):
assert s.rd_addr[i] < s.nregs
s.rd_data[i].value = s.regs[ s.rd_addr[i] ]
# Select write logic depending on if this register file should have
# a constant zero register or not!
#-------------------------------------------------------------------
# Sequential write logic
#-------------------------------------------------------------------
if not s.const_zero:
@s.posedge_clk
def seq_logic():
if s.wr_en:
s.regs[ s.wr_addr ].next = s.wr_data
#-------------------------------------------------------------------
# Sequential write logic with constant zero
#-------------------------------------------------------------------
else:
@s.posedge_clk
def seq_logic_const_zero():
if s.wr_en and s.wr_addr != 0:
s.regs[ s.wr_addr ].next = s.wr_data
# TODO: this won't simulate correctly when translated/verilated!!!
# mismatch between Verilog and PyMTL sim semantics...
#waddr = s.wr_addr.value.uint()
#assert waddr < s.nregs
#s.regs[ waddr ].next = s.wr_data.value
def line_trace( s ):
return [x.uint() for x in s.regs]
| Python | 0 |
1e4deb7bb91a66ce1bc20c7201a5053d7b5659fd | Move module loader to python logging | module_loader.py | module_loader.py | #
# This file is part of Plinth.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Discover, load and manage Plinth modules
"""
import django
import importlib
import logging
import os
import urls
LOGGER = logging.getLogger(__name__)
def load_modules():
"""
Read names of enabled modules in modules/enabled directory and
import them from modules directory.
"""
module_names = []
modules = {}
for name in os.listdir('modules/enabled'):
full_name = 'modules.{module}'.format(module=name)
LOGGER.info('Importing %s', full_name)
try:
module = importlib.import_module(full_name)
modules[name] = module
module_names.append(name)
except Exception as exception:
LOGGER.exception('Could not import modules/%s: %s',
name, exception)
_include_module_urls(full_name)
ordered_modules = []
remaining_modules = dict(modules)
for module_name in modules:
if module_name not in remaining_modules:
continue
module = remaining_modules.pop(module_name)
try:
_insert_modules(module_name, module, remaining_modules,
ordered_modules)
except KeyError:
LOGGER.error('Unsatified dependency for module - %s',
module_name)
LOGGER.debug('Module load order - %s', ordered_modules)
for module_name in ordered_modules:
_initialize_module(modules[module_name])
def _insert_modules(module_name, module, remaining_modules, ordered_modules):
"""Insert modules into a list based on dependency order"""
if module_name in ordered_modules:
return
dependencies = []
try:
dependencies = module.DEPENDS
except AttributeError:
pass
for dependency in dependencies:
if dependency in ordered_modules:
continue
try:
module = remaining_modules.pop(dependency)
except KeyError:
LOGGER.error('Not found or circular dependency - %s, %s',
module_name, dependency)
raise
_insert_modules(dependency, module, remaining_modules, ordered_modules)
ordered_modules.append(module_name)
def _include_module_urls(module_name):
"""Include the module's URLs in global project URLs list"""
url_module = module_name + '.urls'
try:
urls.urlpatterns += django.conf.urls.patterns(
'', django.conf.urls.url(
r'', django.conf.urls.include(url_module)))
except ImportError:
LOGGER.debug('No URLs for %s', module_name)
def _initialize_module(module):
"""Call initialization method in the module if it exists"""
try:
init = module.init
except AttributeError:
LOGGER.debug('No init() for module - %s', module.__name__)
return
try:
init()
except Exception as exception:
LOGGER.exception('Exception while running init for %s: %s',
module, exception)
def get_template_directories():
"""Return the list of template directories"""
directory = os.path.dirname(os.path.abspath(__file__))
core_directory = os.path.join(directory, 'templates')
directories = set((core_directory,))
for name in os.listdir('modules/enabled'):
directories.add(os.path.join('modules', name, 'templates'))
return directories
| #
# This file is part of Plinth.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Discover, load and manage Plinth modules
"""
import django
import importlib
import os
import cfg
import urls
def load_modules():
"""
Read names of enabled modules in modules/enabled directory and
import them from modules directory.
"""
module_names = []
modules = {}
for name in os.listdir('modules/enabled'):
full_name = 'modules.{module}'.format(module=name)
cfg.log.info('Importing {full_name}'.format(full_name=full_name))
try:
module = importlib.import_module(full_name)
modules[name] = module
module_names.append(name)
except ImportError as exception:
cfg.log.error(
'Could not import modules/{module}: {exception}'
.format(module=name, exception=exception))
_include_module_urls(full_name)
ordered_modules = []
remaining_modules = dict(modules)
for module_name in modules:
if module_name not in remaining_modules:
continue
module = remaining_modules.pop(module_name)
try:
_insert_modules(module_name, module, remaining_modules,
ordered_modules)
except KeyError:
cfg.log.error('Unsatified dependency for module - %s' %
(module_name,))
cfg.log.debug('Module load order - %s' % ordered_modules)
for module_name in ordered_modules:
_initialize_module(modules[module_name])
def _insert_modules(module_name, module, remaining_modules, ordered_modules):
"""Insert modules into a list based on dependency order"""
if module_name in ordered_modules:
return
dependencies = []
try:
dependencies = module.DEPENDS
except AttributeError:
pass
for dependency in dependencies:
if dependency in ordered_modules:
continue
try:
module = remaining_modules.pop(dependency)
except KeyError:
cfg.log.error('Not found or circular dependency - %s, %s' %
(module_name, dependency))
raise
_insert_modules(dependency, module, remaining_modules, ordered_modules)
ordered_modules.append(module_name)
def _include_module_urls(module_name):
"""Include the module's URLs in global project URLs list"""
url_module = module_name + '.urls'
try:
urls.urlpatterns += django.conf.urls.patterns(
'', django.conf.urls.url(
r'', django.conf.urls.include(url_module)))
except ImportError:
cfg.log.debug('No URLs for {module}'.format(module=module_name))
def _initialize_module(module):
"""Call initialization method in the module if it exists"""
try:
init = module.init
except AttributeError:
cfg.log.debug('No init() for module - {module}'
.format(module=module.__name__))
return
try:
init()
except Exception as exception:
cfg.log.error('Exception while running init for {module}: {exception}'
.format(module=module, exception=exception))
def get_template_directories():
"""Return the list of template directories"""
directory = os.path.dirname(os.path.abspath(__file__))
core_directory = os.path.join(directory, 'templates')
directories = set((core_directory,))
for name in os.listdir('modules/enabled'):
directories.add(os.path.join('modules', name, 'templates'))
cfg.log.info('Template directories - %s' % directories)
return directories
| Python | 0.000001 |
cf469dcba17d3a93bd4bb1651fff6a22de4bc5ba | add code to access database | louis-html-analyzer/database.py | louis-html-analyzer/database.py | import MySQLdb
class database:
def __init__(self, hostName="localhost", userName="root", password="", database="wbm"):
self.db = MySQLdb.connect(host = hostName, user = userName,
passwd = password, db = database)
self.db.autocommit(True)
self.cur = self.db.cursor()
def getHTML(self,itemID):
getHTML_query = "select snapshot_date, crawl_data, meaningfulText from snapshot_allyear where itemID = %s order by snapshot_date desc" % itemID
self.cur.execute(getHTML_query)
return self.cur.fetchall() #return type: (date, html, text)
if __name__ == '__main__':
db = database()
htmlist = db.getHTML(3394)
for (date,html,text) in htmlist:
print date,text
print '------------------------------------------------------------'
| Python | 0.000001 | |
97f66afad570e068d8585700121a3cbf6afa59b9 | Add UtfGridComposite that can composite UtfGrid tiles (MapnikGrid) | TileStache/Goodies/Providers/UtfGridComposite.py | TileStache/Goodies/Providers/UtfGridComposite.py | """ Composite Provider for UTFGrid layers
https://github.com/mapbox/mbtiles-spec/blob/master/1.1/utfgrid.md
Combines multiple UTFGrid layers to create a single result.
The given layers will be added to the result in the order they are given.
Therefore the last one will have the highest priority.
Sample configuration:
"provider":
{
"class": "TileStache.Goodies.Providers.UtfGridComposite:Provider",
"kwargs":
{
"stack":
[
{ "layer_id": "layer1", "src": "my_utf_layer1", "wrapper": "grid" },
{ "layer_id": "layer2", "src": "my_utf_layer2", "wrapper": "grid" }
],
"layer_id": "l",
"wrapper": "grid"
}
}
stack: list of layers (and properties) to composite together
layer_id: an id attribute that will be added to each json data object for this layer: { "layer_id": "layer1", "name": "blah", "address": "something"}
src: layer name of the layer to composite
wrapper: the wrapper definition of this layer if there is one (so we can remove it)
layer_id: the key for the layer_id attribute that is added to each data object: { "l": "layer1", ...}
wrapper: wrapper to add to the resulting utfgrid "WRAPPER({...})". Usually "grid"
if layer_id is not set in the layer or the provider config then it will not be set on data objects
"""
import json
import TileStache
from TileStache.Core import KnownUnknown
class Provider:
def __init__(self, layer, stack, layer_id=None, wrapper=None):
#Set up result storage
self.resultGrid = []
self.gridKeys = []
self.gridData = {}
self.layer = layer
self.stack = stack
self.layer_id = layer_id
self.wrapper = wrapper
def renderTile(self, width, height, srs, coord):
for l in self.stack:
self.addLayer(l, coord)
return SaveableResponse(self.writeResult())
def getTypeByExtension(self, extension):
""" Get mime-type and format by file extension.
This only accepts "json".
"""
if extension.lower() != 'json':
raise KnownUnknown('UtfGridComposite only makes .json tiles, not "%s"' % extension)
return 'text/json', 'JSON'
def addLayer( self, layerDef, coord ):
layer = TileStache.getTile(self.layer.config.layers[layerDef['src']], coord, 'JSON')[1]
# raise KnownUnknown(layer)
if layerDef['wrapper'] == None:
layer = json.loads(layer)
else:
layer = json.loads(layer[(len(layerDef['wrapper'])+1):-1]) #Strip "Wrapper(...)"
gridSize = len(layer['grid'])
#init resultGrid based on given layers (if required)
if len(self.resultGrid) == 0:
for i in xrange(gridSize):
self.resultGrid.append([])
for j in xrange(gridSize):
self.resultGrid[i].append(-1)
keys = layer['keys']
keyRemap = {}
for k in keys:
if k in self.gridKeys:
for ext in xrange('a', 'z'+1):
if not k+ext in self.gridKeys:
keyRemap[k] = (k+ext)
break
if not k in keyRemap:
raise Error("Couldn't remap")
addedKeys = [] #FIXME: HashSet<string>?
for y in xrange(gridSize):
line = layer['grid'][y]
for x in xrange(gridSize):
idNo = self.decodeId(line[x])
if keys[idNo] == "":
continue
key = keys[idNo]
if keys[idNo] in keyRemap:
key = keyRemap[keys[idNo]]
if not key in addedKeys:
self.gridKeys.append(key)
addedKeys.append(key)
if layerDef['layer_id'] != None and self.layer_id != None: #Add layer name attribute
layer['data'][keys[idNo]][self.layer_id] = layerDef['layer_id']
self.gridData[key] = layer['data'][keys[idNo]]
newId = self.gridKeys.index(key)
self.resultGrid[x][y] = newId
def writeResult( self ):
gridSize = len(self.resultGrid)
finalKeys = []
finalData = {}
finalGrid = []
for i in xrange(gridSize):
finalGrid.append("")
finalIdCounter = 0
idToFinalId = {}
for y in xrange(gridSize):
for x in xrange(gridSize):
id = self.resultGrid[x][y]
if not id in idToFinalId:
idToFinalId[id] = finalIdCounter
finalIdCounter = finalIdCounter + 1
if id == -1:
finalKeys.append("")
else:
finalKeys.append(self.gridKeys[id])
finalData[self.gridKeys[id]] = self.gridData[self.gridKeys[id]]
finalId = idToFinalId[id]
finalGrid[y] = finalGrid[y] + self.encodeId(finalId)
result = "{\"keys\": ["
for i in xrange(len(finalKeys)):
if i > 0:
result += ","
result += "\"" + finalKeys[i] + "\""
result += "], \"data\": { "
first = True
for entry in self.gridData:
if not first:
result += ","
first = False
result += "\"" + entry + "\": " + json.dumps(self.gridData[entry]) + ""
result += "}, \"grid\": ["
for i in xrange(gridSize):
line = finalGrid[i]
result += "\"" + line + "\""
if i < gridSize - 1:
result += ","
if self.wrapper == None:
return result + "]}"
else:
return self.wrapper + "(" + result + "]})"
def encodeId ( self, id ):
id += 32
if id >= 34:
id = id + 1
if id >= 92:
id = id + 1
return chr(id)
def decodeId( self, id ):
id = ord(id)
if id >= 93:
id = id - 1
if id >= 35:
id = id - 1
return id - 32
class SaveableResponse:
""" Wrapper class for JSON response that makes it behave like a PIL.Image object.
TileStache.getTile() expects to be able to save one of these to a buffer.
"""
def __init__(self, content):
self.content = content
def save(self, out, format):
if format != 'JSON':
raise KnownUnknown('MapnikGrid only saves .json tiles, not "%s"' % format)
out.write(self.content)
| Python | 0 | |
4158b54244cda38b5643f07d9ad825877c7ff2d7 | Make subset module callable | Lib/fontTools/subset/__main__.py | Lib/fontTools/subset/__main__.py | from __future__ import print_function, division, absolute_import
from fontTools.misc.py23 import *
from fontTools.subset import main
main()
| Python | 0.000013 | |
e5b891f6cc2d1e2d362038df90fa42d2e8fb1133 | Create derpderpderp.py | derpderpderp.py | derpderpderp.py | import psycopg2
class DataSource:
def __init__(self):
psw = 'meow372pony'
dbn = 'luoj'
usn = 'luoj'
comm = psycopg2.connect(database = dbn, user=usn, password=psw)
self.cursor = comm.cursor()
self.mainDatabaseName = 'majorcounts'
self.majorNameDBName = 'majors'
self.genderDBName = 'genders'
def get_all_graduates_from_year(self, year, gender = 'Both'):
"""Returns a dictionary with keys equal to each major in the dataset
and values equal to the number of graduates with specified gender
and with majors that are the corresponding key.
year is an integer. Throws an exception if the specified year
isn't a year in our dataset.
gender is a string. Throws an exception if specified gender
isn't 'male', 'female', or 'all'. If gender is all, graduates of
any gender are considered."""
return {}
def get_graduates_from_all_years(self, major = 'all', gender = 'Both'):
"""Returns a dictionary with keys equal to each year in the dataset
and values equal to the number of graduates with
the specified major and specified gender in a year.
major is a string. Throws an exception if the specified major isn't
a major in our dataset. If major is 'all', graduates of any major
are considered.
gender is a string. Throws an exception if the specified gender
isn't 'male', 'female', or 'all'. If gender is all, graduates of
any gender are considered."""
return {}
def get_graduates_in_year_range(self, year_start, year_end, major_input = 'Total', gender_input = 'Both'):
"""Returns a dictionary of the total number of graduates with a given
major_input and given gender_input in all years in the range specified by the user.
year_start and year_end are integers. year_start and year_end specify
the range of years for which the dictionary includes information on graduates.
Throws an exception if year_start or year_end are not years in our dataset.
major_input is a string. Throws an exception if the specified major_input isn't
a major_input in our dataset. If major_input is 'Total', graduates of any major_input
are considered.
gender_input is a string. If the specified gender
isn't 'Male' or 'Female', graduates of
any gender are considered.
@return: a list of number of graduates of a specific major_input
in the given year span, ordered by year ascendingly.
"""
major = major_input
gender = self.sanitize_gender_input(gender_input)
if "'" in major:
'''This is to take care of the single quote in major_input name'''
major = major.replace("'","\\\'")
majorCodeQuery = 'SELECT majorcode FROM majors WHERE major = \'%s\'' % major
self.cursor.execute(majorCodeQuery)
majorCode = self.cursor.fetchall()[0][0]
genderCodeQuery = 'SELECT genderCode FROM gender WHERE gender = \'%s\'' % gender
self.cursor.execute(genderCodeQuery)
genderCode = self.cursor.fetchall()[0][0]
query = 'SELECT year, graduates FROM majorcounts WHERE year \
>= %s AND year <= %s AND major=\'%s\'AND gender=\'%s\' ORDER BY year ASC;'
query = query % (year_start, year_end, majorCode, genderCode)
self.cursor.execute(query)
rawDataList = self.cursor.fetchall()
numGraduateInYearSpan = {}
for data in rawDataList:
numGraduateInYearSpan[data[0]] = data[1]
return numGraduateInYearSpan
def get_top_N_popular_major_in_year(self, year, topN = 5, gender_input = 'Both'):
'''Return a list of tuples of most popular majors and the number of graduates in those majors
@param year: The year of query
@param topN: Default to be 5, how many top majors you want to query
'''
gender = self.sanitize_gender_input(gender_input)
query = 'SELECT majors.major, majorcounts.graduates FROM majorcounts \
WHERE year = %s ORDER BY DESC LIMIT %s' % (year, topN)
self.cursor.execute(query)
rawDataList = self.cursor.fetchall()
topMajorGradTupleInYear = {}
for data in rawDataList:
topMajorGradTupleInYear[data[0]] = data[1]
def get_graduates_from_year(self, year, major = 'all', gender = 'Both'):
"""Returns the number of graduates in a given year with a given major
and given gender.
year is an integer. Throws an exception if the specified year
isn't a year in our dataset.
major is a string. Throws an exception if the specified major isn't
a major in our dataset. If major is 'all', graduates of any major
are considered.
gender is a string. Throws an exception if the specified gender
isn't 'male', 'female', or 'all'. If gender is all, graduates of
any gender are considered."""
return 0
def get_number_of_degrees_from_year(self, year):
"""Returns the total number of degrees awarded in any major in a given
year. Note that this is different from the total number of graduates:
some graduates may be awarded degrees in mutliple majors.
year is an integer. Throws an exception if the specified year
isn't a year in our dataset."""
return 0
def get_number_of_degrees_from_all_years(self):
"""Returns a dictionary of the total number of degrees awarded in any
major for all years in the dataset. Keys are years and values are
the corresponding number of degrees awarded. Note that the total
number of degrees awarded is different from the total number of
graduates because some graduates may be awarded degrees in mutliple
majors."""
return {}
def get_number_of_degrees_in_year_range(self, year_start, year_end):
"""Returns a dictionary of the total number of degrees awarded in any
major for all years in the user-specified range. Keys are years and
values are the corresponding number of degrees awarded. Note that the
total number of degrees awarded is different from the total number of
graduates because some graduates may be awarded degrees in mutliple
majors.
year_start and year_end are integers. year_start and year_end
specify the range of years for which the dictionary contains information
on degrees. Throws an exception if year_start or year_end are not
years in our dataset."""
return {}
def get_majors(self):
"""Returns a list of all majors in the dataset."""
query = 'SELECT major FROM %s ORDER BY majorcode ASC;' % self.majorNameDBName
self.cursor.execute(query)
rawDataList = self.cursor.fetchall()
numGraduateInYearSpan = []
for data in rawDataList:
numGraduateInYearSpan.append(data[0])
return numGraduateInYearSpan
def get_years(self):
"""Returns a list of all years in the dataset."""
query = 'SELECT year FROM '
return []
def sanitize_gender_input(self, gender_input):
if gender_input != 'Male' or gender_input != 'Female' or gender_input != 'Both':
gender_input = 'Both'
return gender_input
| Python | 0.000002 | |
bdfa3e67606e3bae243a64ad1e502edf552d2fdf | add problem 17 | euler017.py | euler017.py | #!/usr/bin/env python
# this barely works, but does output correct words up to 1000
def num2words(n):
onesteens = { 1 : "one",
2 : "two",
3 : "three",
4 : "four",
5 : "five",
6 : "six",
7 : "seven",
8 : "eight",
9 : "nine",
10 : "ten",
11 : "eleven",
12 : "twelve",
13 : "thirteen",
14 : "fourteen",
15 : "fifteen",
16 : "sixteen",
17 : "seventeen",
18 : "eighteen",
19 : "nineteen"
}
tens = { 2 : "twenty",
3 : "thirty",
4 : "forty",
5 : "fifty",
6 : "sixty",
7 : "seventy",
8 : "eighty",
9 : "ninety",
}
powersoften = { 100 : "hundred",
1000 : "thousand"
}
words = []
if n > 999:
thousands = n / 1000
words.extend([onesteens[thousands], "thousand"])
if n % 1000 > 99:
hundreds = n / 100
words.extend([onesteens[hundreds], "hundred"])
if n % 100 != 0 and n > 100:
words.append("and")
if n % 100 >= 20:
words.append(tens[n % 100 / 10])
if n % 10 != 0:
words.append(onesteens[n % 10])
elif n % 100 != 0 :
words.append(onesteens[n % 100])
return words
if __name__=="__main__":
debugging = False
sum = 0
for i in range(1,1001):
words = num2words(i)
if debugging:
print ' '.join(words)
sum += len(''.join(words))
print sum
| Python | 0.00153 | |
50dded21e316b6b8e6cb7800b17ed7bd92624946 | Add toy example of reading a large XML file | xml_to_json.py | xml_to_json.py | #!/usr/bin/env python
import xml.etree.cElementTree as ET
from sys import argv
input_file = argv[1]
NAMESPACE = "{http://www.mediawiki.org/xml/export-0.10/}"
with open(input_file) as open_file:
in_page = False
for _, elem in ET.iterparse(open_file):
# Pull out each revision
if elem.tag == NAMESPACE + "revision":
# Look at each subtag, if it is the 'sha1' tag, print out the text content
for child in elem:
if child.tag == NAMESPACE + "sha1":
print child.text
# Clear the child to free up memory
child.clear()
# Now clear the parent once we've finished with it to further clean up
elem.clear()
| Python | 0 | |
d1178f9d6467ccf8fd9681ad5ef47614ab80882e | Add new command to check blobs logged missing | corehq/blobs/management/commands/check_blob_logs.py | corehq/blobs/management/commands/check_blob_logs.py | import json
import logging
import casexml.apps.case.models as cases
import couchforms.models as xform
from couchdbkit.exceptions import ResourceNotFound
from couchexport.models import SavedBasicExport
from django.core.management import BaseCommand, CommandError
import corehq.apps.app_manager.models as apps
from corehq.apps.case_importer.tracking.filestorage import BUCKET as CASE_UPLOAD_BUCKET
from corehq.apps.case_importer.tracking.models import CaseUploadFileMeta
from corehq.apps.export import models as exports
from corehq.blobs import get_blob_db
from corehq.blobs.migratingdb import MigratingBlobDB
from corehq.util.decorators import change_log_level
USAGE = "Usage: ./manage.py check_blob_logs [options] FILE [FILE [FILE]]"
BLOB_MIXIN_MODELS = {
"Application": apps.Application,
"Application-Deleted": apps.Application,
"CaseExportInstance": exports.CaseExportInstance,
"FormExportInstance": exports.FormExportInstance,
"SavedBasicExport": SavedBasicExport,
"XFormInstance": xform.XFormInstance,
}
class Command(BaseCommand):
"""Verify missing blobs in blob db backend migration log files.
Example: ./manage.py check_blob_logs [options] migration-log.txt
"""
help = USAGE
def add_arguments(self, parser):
parser.add_argument(
'files',
nargs="+",
help="Log files with blobs to check. Each line of the log file "
"should be a JSON record with blob_identifier, blob_bucket, "
"doc_type, and error fields",
)
parser.add_argument(
'--migrate',
action="store_true",
default=False,
help="Copy blobs found in old db to new db.",
)
@change_log_level('boto3', logging.WARNING)
@change_log_level('botocore', logging.WARNING)
def handle(self, files, migrate=False, **options):
blob_db = get_blob_db()
if not isinstance(blob_db, MigratingBlobDB):
raise CommandError(
"Expected to find migrating blob db backend (got %r)" % blob_db)
old_db = blob_db.old_db
new_db = blob_db.new_db
for filepath in files:
with open(filepath) as fh:
for line in fh:
if not line:
continue
try:
rec = json.loads(line)
except ValueError:
print("Ignore {}", line)
continue
if rec.get("error") != "not found":
print("Ignore {}".format(json.dumps(rec)))
continue
category = check_blob(rec, old_db, new_db, migrate)
stats = Stats.get(rec["doc_type"])
setattr(stats, category, getattr(stats, category) + 1)
for doc_type, stats in sorted(Stats.items.items()):
total = stats.new + stats.old + stats.noref + stats.lost
print("{}: checked {} records".format(doc_type, total))
print(" Found in new db: {}".format(stats.new))
print(" Found in old db: {}".format(stats.old))
print(" Not referenced: {}".format(stats.noref))
print(" Not found: {}".format(stats.lost))
def check_blob(rec, old_db, new_db, migrate=False):
identifier = rec["blob_identifier"]
bucket = rec["blob_bucket"]
if new_db.exists(identifier, bucket):
return "new"
if old_db.exists(identifier, bucket):
if migrate:
with old_db.get(identifier, bucket) as content:
new_db.copy_blob(content, Info(identifier), bucket)
migrated = " migrated"
else:
migrated = ""
print("Found in old db: {}{}".format(json.dumps(rec), migrated))
return "old"
doc_type = BLOB_MIXIN_MODELS.get(rec["doc_type"])
if doc_type is not None:
try:
doc = doc_type.get_db().get(rec["doc_id"])
except ResourceNotFound:
print("Not referenced: {} doc not found".format(json.dumps(rec)))
return "noref"
for name, info in doc.get("external_blobs", {}).items():
if info["id"] == identifier:
print("Missing: {} blob info: {}: {}".format(
json.dumps(rec),
repr(name),
info,
))
return "lost"
print("Not referenced: {}".format(json.dumps(rec)))
return "noref"
print("Missing: {}".format(json.dumps(rec)))
return "lost"
class Stats(object):
items = {}
@classmethod
def get(cls, doc_type):
item = cls.items.get(doc_type)
if item is None:
item = cls.items[doc_type] = cls()
return item
def __init__(self):
self.new = 0
self.old = 0
self.lost = 0
self.noref = 0
class Info(object):
def __init__(self, identifier):
self.identifier = identifier
| Python | 0 | |
8176e8784247262d32e1adad5f86b181c1a202ca | Test echo sql | airflow/settings.py | airflow/settings.py | import logging
import os
import sys
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy import create_engine
from airflow.configuration import conf
HEADER = """\
____________ _____________
____ |__( )_________ __/__ /________ __
____ /| |_ /__ ___/_ /_ __ /_ __ \_ | /| / /
___ ___ | / _ / _ __/ _ / / /_/ /_ |/ |/ /
_/_/ |_/_/ /_/ /_/ /_/ \____/____/|__/
"""
BASE_LOG_URL = '/admin/airflow/log'
AIRFLOW_HOME = os.path.expanduser(conf.get('core', 'AIRFLOW_HOME'))
SQL_ALCHEMY_CONN = conf.get('core', 'SQL_ALCHEMY_CONN')
LOGGING_LEVEL = logging.INFO
DAGS_FOLDER = os.path.expanduser(conf.get('core', 'DAGS_FOLDER'))
engine_args = {}
if 'sqlite' not in SQL_ALCHEMY_CONN:
# Engine args not supported by sqlite
engine_args['pool_size'] = 50
engine_args['pool_recycle'] = 3600
engine_args['echo'] = True
engine = create_engine(
SQL_ALCHEMY_CONN, **engine_args)
Session = scoped_session(
sessionmaker(autocommit=False, autoflush=False, bind=engine))
# can't move this to configuration due to ConfigParser interpolation
LOG_FORMAT = (
'[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s')
SIMPLE_LOG_FORMAT = '%(asctime)s %(levelname)s - %(message)s'
def policy(task_instance):
"""
This policy setting allows altering task instances right before they
are executed. It allows administrator to rewire some task parameters.
Note that the ``TaskInstance`` object has an attribute ``task`` pointing
to its related task object, that in turns has a reference to the DAG
object. So you can use the attributes of all of these to define your
policy.
To define policy, add a ``airflow_local_settings`` module
to your PYTHONPATH that defines this ``policy`` function. It receives
a ``TaskInstance`` object and can alter it where needed.
Here are a few examples of how this can be useful:
* You could enforce a specific queue (say the ``spark`` queue)
for tasks using the ``SparkOperator`` to make sure that these
task instances get wired to the right workers
* You could force all task instances running on an
``execution_date`` older than a week old to run in a ``backfill``
pool.
* ...
"""
pass
try:
from airflow_local_settings import *
logging.info("Loaded airflow_local_settings.")
except:
pass
| import logging
import os
import sys
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy import create_engine
from airflow.configuration import conf
HEADER = """\
____________ _____________
____ |__( )_________ __/__ /________ __
____ /| |_ /__ ___/_ /_ __ /_ __ \_ | /| / /
___ ___ | / _ / _ __/ _ / / /_/ /_ |/ |/ /
_/_/ |_/_/ /_/ /_/ /_/ \____/____/|__/
"""
BASE_LOG_URL = '/admin/airflow/log'
AIRFLOW_HOME = os.path.expanduser(conf.get('core', 'AIRFLOW_HOME'))
SQL_ALCHEMY_CONN = conf.get('core', 'SQL_ALCHEMY_CONN')
LOGGING_LEVEL = logging.INFO
DAGS_FOLDER = os.path.expanduser(conf.get('core', 'DAGS_FOLDER'))
engine_args = {}
if 'sqlite' not in SQL_ALCHEMY_CONN:
# Engine args not supported by sqlite
engine_args['pool_size'] = 50
engine_args['pool_recycle'] = 3600
engine = create_engine(
SQL_ALCHEMY_CONN, **engine_args)
Session = scoped_session(
sessionmaker(autocommit=False, autoflush=False, bind=engine))
# can't move this to configuration due to ConfigParser interpolation
LOG_FORMAT = (
'[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s')
SIMPLE_LOG_FORMAT = '%(asctime)s %(levelname)s - %(message)s'
def policy(task_instance):
"""
This policy setting allows altering task instances right before they
are executed. It allows administrator to rewire some task parameters.
Note that the ``TaskInstance`` object has an attribute ``task`` pointing
to its related task object, that in turns has a reference to the DAG
object. So you can use the attributes of all of these to define your
policy.
To define policy, add a ``airflow_local_settings`` module
to your PYTHONPATH that defines this ``policy`` function. It receives
a ``TaskInstance`` object and can alter it where needed.
Here are a few examples of how this can be useful:
* You could enforce a specific queue (say the ``spark`` queue)
for tasks using the ``SparkOperator`` to make sure that these
task instances get wired to the right workers
* You could force all task instances running on an
``execution_date`` older than a week old to run in a ``backfill``
pool.
* ...
"""
pass
try:
from airflow_local_settings import *
logging.info("Loaded airflow_local_settings.")
except:
pass
| Python | 0.000022 |
17558f8f494627c287262ac2d5151d99fb9303e2 | Create getrekthagin.py | getrekthagin.py | getrekthagin.py | Python | 0 | ||
ab7324ba674038dde4581bcb5645c1dd828aa31f | Add seatgeek spider code. | crawler/crawling/spiders/seatgeek_spider_example.py | crawler/crawling/spiders/seatgeek_spider_example.py | import scrapy
from scrapy.http import Request
from lxmlhtml import CustomLxmlLinkExtractor as LinkExtractor
from scrapy.conf import settings
from crawling.items import RawResponseItem
from redis_spider import RedisSpider
class SeatGeekSpider(RedisSpider):
'''
A spider that walks all links from the requested URL. This is
the entrypoint for generic crawling.
'''
name = "sg"
def __init__(self, *args, **kwargs):
super(SeatGeekSpider, self).__init__(*args, **kwargs)
def parse(self, response):
selectorList = response.css('.cell-wrapper a')
selectListLength = len(selectorList)
yield {
'html body' : response.body
}
for i in range(0, selectListLength):
yield{
'name' : str(response.css('.cell-wrapper a')[i].extract().split('>')[1].replace('</a',''))
}
''' def start_requests(self):
req = scrapy.Request(url=self.start_urls[0])
self.randomproxy.generateRandomProxy()
req.meta['proxy'] = self.randomproxy.proxy_address
basic_auth = 'Basic ' + base64.encodestring(self.randomproxy.user_pass)
req.headers['Proxy-Authorization'] = basic_auth
yield req'''
| Python | 0 | |
70815d8ac3ff8648b5db9ad6e38b1eb3be6fd0cb | Create examples.py | examples.py | examples.py | import pandas as pd
| Python | 0 | |
86658f310d0c6579c706bce1013e08a42d507609 | Fix for multiple camera switches naming of entity (#14028) | homeassistant/components/switch/amcrest.py | homeassistant/components/switch/amcrest.py | """
Support for toggling Amcrest IP camera settings.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/switch.amcrest/
"""
import asyncio
import logging
from homeassistant.components.amcrest import DATA_AMCREST, SWITCHES
from homeassistant.const import (
CONF_NAME, CONF_SWITCHES, STATE_OFF, STATE_ON)
from homeassistant.helpers.entity import ToggleEntity
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['amcrest']
@asyncio.coroutine
def async_setup_platform(hass, config, async_add_devices, discovery_info=None):
"""Set up the IP Amcrest camera switch platform."""
if discovery_info is None:
return
name = discovery_info[CONF_NAME]
switches = discovery_info[CONF_SWITCHES]
camera = hass.data[DATA_AMCREST][name].device
all_switches = []
for setting in switches:
all_switches.append(AmcrestSwitch(setting, camera, name))
async_add_devices(all_switches, True)
class AmcrestSwitch(ToggleEntity):
"""Representation of an Amcrest IP camera switch."""
def __init__(self, setting, camera, name):
"""Initialize the Amcrest switch."""
self._setting = setting
self._camera = camera
self._name = '{} {}'.format(SWITCHES[setting][0], name)
self._icon = SWITCHES[setting][1]
self._state = None
@property
def name(self):
"""Return the name of the switch if any."""
return self._name
@property
def state(self):
"""Return the state of the switch."""
return self._state
@property
def is_on(self):
"""Return true if switch is on."""
return self._state == STATE_ON
def turn_on(self, **kwargs):
"""Turn setting on."""
if self._setting == 'motion_detection':
self._camera.motion_detection = 'true'
elif self._setting == 'motion_recording':
self._camera.motion_recording = 'true'
def turn_off(self, **kwargs):
"""Turn setting off."""
if self._setting == 'motion_detection':
self._camera.motion_detection = 'false'
elif self._setting == 'motion_recording':
self._camera.motion_recording = 'false'
def update(self):
"""Update setting state."""
_LOGGER.debug("Polling state for setting: %s ", self._name)
if self._setting == 'motion_detection':
detection = self._camera.is_motion_detector_on()
elif self._setting == 'motion_recording':
detection = self._camera.is_record_on_motion_detection()
self._state = STATE_ON if detection else STATE_OFF
@property
def icon(self):
"""Return the icon for the switch."""
return self._icon
| """
Support for toggling Amcrest IP camera settings.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/switch.amcrest/
"""
import asyncio
import logging
from homeassistant.components.amcrest import DATA_AMCREST, SWITCHES
from homeassistant.const import (
CONF_NAME, CONF_SWITCHES, STATE_OFF, STATE_ON)
from homeassistant.helpers.entity import ToggleEntity
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['amcrest']
@asyncio.coroutine
def async_setup_platform(hass, config, async_add_devices, discovery_info=None):
"""Set up the IP Amcrest camera switch platform."""
if discovery_info is None:
return
name = discovery_info[CONF_NAME]
switches = discovery_info[CONF_SWITCHES]
camera = hass.data[DATA_AMCREST][name].device
all_switches = []
for setting in switches:
all_switches.append(AmcrestSwitch(setting, camera))
async_add_devices(all_switches, True)
class AmcrestSwitch(ToggleEntity):
"""Representation of an Amcrest IP camera switch."""
def __init__(self, setting, camera):
"""Initialize the Amcrest switch."""
self._setting = setting
self._camera = camera
self._name = SWITCHES[setting][0]
self._icon = SWITCHES[setting][1]
self._state = None
@property
def name(self):
"""Return the name of the switch if any."""
return self._name
@property
def state(self):
"""Return the state of the switch."""
return self._state
@property
def is_on(self):
"""Return true if switch is on."""
return self._state == STATE_ON
def turn_on(self, **kwargs):
"""Turn setting on."""
if self._setting == 'motion_detection':
self._camera.motion_detection = 'true'
elif self._setting == 'motion_recording':
self._camera.motion_recording = 'true'
def turn_off(self, **kwargs):
"""Turn setting off."""
if self._setting == 'motion_detection':
self._camera.motion_detection = 'false'
elif self._setting == 'motion_recording':
self._camera.motion_recording = 'false'
def update(self):
"""Update setting state."""
_LOGGER.debug("Polling state for setting: %s ", self._name)
if self._setting == 'motion_detection':
detection = self._camera.is_motion_detector_on()
elif self._setting == 'motion_recording':
detection = self._camera.is_record_on_motion_detection()
self._state = STATE_ON if detection else STATE_OFF
@property
def icon(self):
"""Return the icon for the switch."""
return self._icon
| Python | 0.000001 |
e0acea07d77d86313ee2436cdfc96a6258c1991c | Add admin for MembershipPersonRole | amy/fiscal/admin.py | amy/fiscal/admin.py | from django.contrib import admin
from fiscal.models import MembershipPersonRole
class MembershipPersonRoleAdmin(admin.ModelAdmin):
list_display = ("name", "verbose_name")
search_fields = ("name", "verbose_name")
admin.site.register(MembershipPersonRole, MembershipPersonRoleAdmin)
| Python | 0 | |
71e66eaebab2dcb6f37ab6c1409bdd357b60db68 | Add create-DB script | createDb.py | createDb.py | from ummbNet import *
db.create_all()
| Python | 0.000001 | |
6b0f13d9d5a067c116a2f2b17381eadf322dd05b | Add more tests | tests/test_evaluation/test_TopListEvaluator.py | tests/test_evaluation/test_TopListEvaluator.py | from nose.tools import assert_equal, assert_greater
from otdet.evaluation import TopListEvaluator
class TestAddResult:
def setUp(self):
self.sample_result = [(5.0, True), (4.0, False), (3.0, True),
(2.0, False), (1.0, False)]
self.M = len(self.sample_result)
self.n = sum(elm[1] for elm in self.sample_result)
def test_normal_result(self):
N = 2
k = sum(elm[1] for elm in self.sample_result[:N])
evaluator = TopListEvaluator(N)
evaluator.add_result(self.sample_result)
assert_equal(evaluator._M, self.M)
assert_equal(evaluator._n, self.n)
assert_equal(evaluator._numexpr, 1)
assert_equal(evaluator._freq[k], 1)
def test_short_result(self):
N = 10
k = sum(elm[1] for elm in self.sample_result[:N])
evaluator = TopListEvaluator(N)
evaluator.add_result(self.sample_result)
assert_equal(evaluator._M, self.M)
assert_equal(evaluator._n, self.n)
assert_equal(evaluator._numexpr, 1)
assert_equal(evaluator._freq[k], 1)
def test_called_twice(self):
N = 2
evaluator = TopListEvaluator(N)
evaluator.add_result(self.sample_result)
evaluator.add_result(self.sample_result)
assert_equal(evaluator._numexpr, 2)
assert_greater(len(evaluator._result_list), 0)
assert_equal(evaluator._result_list[0], self.sample_result)
| Python | 0 | |
60b01719e5780f9adb2cc25e3da60201822bb966 | Add SAT object code | SATObject.py | SATObject.py | #
# SAT object that will have work done onto
class SATObject(object):
"""
"""
# SATObject has only a list of variables (for refrence) and a clause list
def __init__(self):
# Dictionary in case variable is greater than total number of variables
self.varDict = {}
# List of clauses represented with tuples of literals
self.clauses = []
# Reads in clause from a line, but assumes every line ends with zero and
# full clause is listed on this line
def getClauseFromLine(self,clauseLine):
# Clause won't contain repeating literals (CNF)
clause = set()
# Go over each literal in clause (ignore zero at end)
for literal in clauseLine.split()[:-1]:
# Save whether negation (is either 0 or 1)
isNeg = 1 if (literal[0]=='-') else 0
# Variable is a literal with (possible) negation removed
# Add variable to dict as the next integer available for reference
self.varDict[len(self.varDict)] = literal[isNeg:]
# Reform literal from new variable notation (2*v or 2*v+1 if neg)
# Note len of dict is the variable value
literal = len(self.varDict) << 1 | isNeg
# Append to the list for this clas
clause.add(literal)
# Add this clause into the group of clauses
self.clauses.append(clause)
| Python | 0 | |
0a42c1144fbd7f89914aad2f05f7f1fba7aa3890 | Add cuds tests | simphony/cuds/tests/test_cuds.py | simphony/cuds/tests/test_cuds.py | """Tests for CUDS data structure."""
import unittest
import uuid
from simphony import CUDS
from simphony.cuds.particles import Particle, Particles
class CUDSTestCase(unittest.TestCase):
"""CUDS class tests."""
def setUp(self):
self.cuds = CUDS()
# TODO: use generated components
class DummyComponent(object):
def __init__(self):
self.uuid = uuid.uuid4()
self.name = 'dummyname'
self.data = {}
self.dummpy_component1 = DummyComponent()
self.dummpy_component2 = DummyComponent()
def test_empty_cuds(self):
self.assertEqual(len(self.cuds.data), 0)
self.assertEqual(self.cuds.get('nonexistentkey'), None)
self.assertEqual(self.cuds.data, {})
self.assertRaises(KeyError, self.cuds.remove, 'nonexistentkey')
def test_data(self):
data = self.cuds.data
self.assertEqual(self.cuds.data, data)
self.assertIsNot(self.cuds.data, data)
def test_get(self):
self.assertRaises(TypeError,
self.cuds.get,
42)
def test_add_get_component(self):
self.assertRaises(ValueError, self.cuds.add, object())
self.cuds.add(self.dummpy_component1)
self.assertEqual(self.cuds.get(self.dummpy_component1.uuid),
self.dummpy_component1)
def test_add_dataset(self):
p1 = Particle()
p2 = Particle()
ps = Particles('my particles')
ps.add_particles([p1, p2])
self.cuds.add(ps)
self.assertEqual(self.cuds.get(ps.name), ps)
def test_remove_component(self):
self.cuds.add(self.dummpy_component1)
self.cuds.remove(self.dummpy_component1.uuid)
self.assertIsNone(self.cuds.get(self.dummpy_component1.uuid))
def test_remove_dataset(self):
p1 = Particle()
p2 = Particle()
ps = Particles('my particles')
ps.add_particles([p1, p2])
self.cuds.add(ps)
self.cuds.remove(ps.name)
self.assertIsNone(self.cuds.get(ps.name))
def test_get_names(self):
p1 = Particle()
p2 = Particle()
p3 = Particle()
p4 = Particle()
ps1 = Particles('M1')
ps2 = Particles('M2')
ps1.add_particles([p1, p2])
ps2.add_particles([p3, p4])
self.cuds.add(ps1)
self.cuds.add(ps2)
self.assertEqual(self.cuds.get_names(Particles), ['M1', 'M2'])
self.cuds.add(self.dummpy_component1)
self.cuds.add(self.dummpy_component2)
self.assertEqual(self.cuds.get_names(type(self.dummpy_component1)),
[self.dummpy_component1.name,
self.dummpy_component2.name])
def test_iter(self):
p1 = Particle()
p2 = Particle()
p3 = Particle()
p4 = Particle()
ps1 = Particles('M1')
ps2 = Particles('M2')
ps1.add_particles([p1, p2])
ps2.add_particles([p3, p4])
self.cuds.add(ps1)
self.cuds.add(ps2)
for item in self.cuds.iter(Particles):
self.assertIn(item, [ps1, ps2])
self.cuds.add(self.dummpy_component1)
self.cuds.add(self.dummpy_component2)
for item in self.cuds.iter(type(self.dummpy_component1)):
self.assertIn(item, [self.dummpy_component1,
self.dummpy_component2])
| Python | 0 | |
63f9f87a3f04cb03c1e286cc5b6d49306f90e352 | Add solution for problem 4 | python/004_largest_palindrome_product/palindrome_product.py | python/004_largest_palindrome_product/palindrome_product.py | from itertools import combinations_with_replacement
from operator import mul
three_digit_numbers = tuple(range(100, 1000))
combinations = combinations_with_replacement(three_digit_numbers, 2)
products = [mul(*x) for x in combinations]
max_palindrome = max([x for x in products if str(x)[::-1] == str(x)])
| Python | 0.001666 | |
43e9e18b9ebaf5318d742d9347e94a6a483ec5ff | add the log-likelihood to the ridge regression function. | scikits/learn/bayes/bayes.py | scikits/learn/bayes/bayes.py | import numpy as np
import scipy.linalg
def fast_logdet(A):
"""
Compute log(det(A)) for A symmetric
Equivalent to : np.log(nl.det(A))
but more robust
It returns -Inf if det(A) is non positive
Copyright : A. Gramfort 2010
"""
from math import exp,log
ld = np.sum(np.log(np.diag(A)))
if not np.isfinite(ld):
return -np.inf
a = exp(ld/A.shape[0])
d = scipy.linalg.det(A/a)
if d <= 0:
return -np.inf
ld += log(d)
if not np.isfinite(ld):
return -np.inf
return ld
def bayesian_ridge( X , Y, step_th=300,th_w = 1.e-6,ll_bool=True) :
"""
Bayesian ridge regression. Optimize the regularization parameter alpha
within a simple bayesian framework (MAP).
Parameters
----------
X : numpy array of shape (length,features)
data
Y : numpy array of shape (length)
target
step_th : int (defaut is 300)
Stop the algorithm after a given number of steps.
th_w : float (defaut is 1.e-6)
Stop the algorithm if w has converged.
ll_bool : boolean (default is True).
If True, compute the log-likelihood at each step of the model.
Returns
-------
w : numpy array of shape (dim)
mean of the weights distribution.
Examples
--------
>>> X = np.array([[1], [2]])
>>> Y = np.array([1, 2])
>>> w = ridge_regression(X,Y)
w = 1.
Notes
-----
See Bishop p 345-348 for more details.
"""
beta = 1./np.var(Y)
alpha = 1.0
log_likelihood = []
has_converged = False
gram = np.dot(X.T, X)
ones = np.eye(gram.shape[1])
sigma = scipy.linalg.pinv(alpha*ones + beta*gram)
w = np.dot(beta*sigma,np.dot(X.T,Y))
old_w = w
while not has_converged and step_th:
### Update Parameters
# alpha
lmbd_ = np.real(scipy.linalg.eigvals(beta * gram.T))
gamma_ = (lmbd_/(alpha + lmbd_)).sum()
alpha = gamma_/np.dot(w.T, w)
# beta
residual_ = (Y - np.dot(X, w))**2
beta = (X.shape[0]-gamma_) / residual_.sum()
### Compute mu and sigma
sigma = scipy.linalg.pinv(alpha*ones + beta*gram)
w = np.dot(beta*sigma,np.dot(X.T,Y))
step_th -= 1
# convergence : compare w
has_converged = (np.sum(np.abs(w-old_w))<th_w)
old_w = w
### Compute the log likelihood
if ll_bool :
ll = 0.5*X.shape[1]*np.log(alpha) + 0.5*X.shape[0]*np.log(beta)
ll -= 0.5*beta*residual_.sum()+ 0.5*alpha*np.dot(w.T,w)
ll -= fast_logdet(inv_sigma_)
ll -= X.shape[0]*np.log(2*np.pi)
log_likelihood.append(ll)
return w
| import numpy as np
import scipy.linalg
def bayesian_ridge( X , Y, step_th=300,th_w = 1.e-6, verbose = True) :
"""
Bayesian ridge regression. Optimize the regularization parameter alpha
within a simple bayesian framework (MAP).
Notes
-----
See Bishop p 345-348 for more details.
Parameters
----------
phi : numpy array of shape (length,dim)
functionnal images (gram matrix)
y : numpy array of shape (length)
target.
prune_th : number
Defaut is 1.e+12. If not None, we remove the alpha by
removing the ones supra-thresholded.
mu_th : number
threshold on the delta of the weights to stop the convergence.
Defaut is 1.e-4.
lambd_th : number
threshold on the lambda, to avoid divergence. Set the lambda
to lambda_th (default is 1.e+12) is lambda > lambda_th.
step_th : number.
Stop the algorithm if the number of step is > step_th.
mode : string
mode of computing for alpha : direct differenciation
"DirectDiff" (defaut)(see Bishop p347), or
expectation-maximization "EM" (see Bishop p450).
"DirectDiff" is normally faster.
verbose : boolean.
Set the output on the console (default is True).
Returns
-------
mu : numpy array of shape (dim)
mean of the weights distribution.
log_evidence : number
the log evidence of p(y|0,Sigma)
"""
beta = 1./np.var(Y)
alpha = 1.0
has_converged = False
gram = np.dot(X.T, X)
ones = np.eye(gram.shape[1])
sigma = scipy.linalg.pinv(alpha*ones + beta*gram)
w = np.dot(beta*sigma,np.dot(X.T,Y))
old_w = w
while not has_converged and step_th:
### Update Parameters
# alpha
lmbd_ = np.real(scipy.linalg.eigvals(beta * gram.T))
gamma_ = (lmbd_/(alpha + lmbd_)).sum()
alpha = gamma_/np.dot(w.T, w)
# beta
residual_ = (Y - np.dot(X, w))**2
beta = (X.shape[0]-gamma_) / residual_.sum()
### Compute mu and sigma
sigma = scipy.linalg.pinv(alpha*ones + beta*gram)
w = np.dot(beta*sigma,np.dot(X.T,Y))
step_th -= 1
# convergence : compare w
has_converged = (np.sum(np.abs(w-old_w))<th_w)
old_w = w
return w
| Python | 0.999999 |
634d703f207d81f817c5bd834e6695d6a439e9a8 | fix ImportError with pytest.mark.tf2 (#6050) | python/chronos/test/bigdl/chronos/forecaster/tf/__init__.py | python/chronos/test/bigdl/chronos/forecaster/tf/__init__.py | #
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| Python | 0 | |
186442a5b50e760f0a3c814cb272c909606ad91a | Create find_factors_down_to_limit.py | find_factors_down_to_limit.py | find_factors_down_to_limit.py | #Kunal Gautam
#Codewars : @Kunalpod
#Problem name: Find Factors Down to Limit
#Problem level: 8 kyu
def factors(integer, limit):
return [x for x in range(limit,(integer//2)+1) if not integer%x] + ([integer] if integer>=limit else [])
| Python | 0.000022 | |
aeeb0e6819439db84f3f7e16ac3f85fd36441315 | add unit test | stomp/test/utils_test.py | stomp/test/utils_test.py | import unittest
from stomp.utils import *
class TestUtils(unittest.TestCase):
def testReturnsTrueWhenLocalhost(self):
self.assertEquals(1, is_localhost(('localhost', 8000)))
self.assertEquals(1, is_localhost(('127.0.0.1', 8000)))
self.assertEquals(2, is_localhost(('192.168.1.92', 8000))) | Python | 0.000001 | |
e9e06a0b85656eb8ce70aff1ac81737a7ffaece3 | Add migration for extended feedback; #909 | judge/migrations/0083_extended_feedback.py | judge/migrations/0083_extended_feedback.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-03-15 23:18
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('judge', '0082_remove_profile_name'),
]
operations = [
migrations.AddField(
model_name='submissiontestcase',
name='extended_feedback',
field=models.TextField(blank=True, verbose_name='extended judging feedback'),
),
]
| Python | 0 | |
d410fb26d3fb8bbd843234e90891bee5a5fff7e7 | Add local dev settings module | halaqat/settings/local_settings.py | halaqat/settings/local_settings.py | from .base_settings import *
DEBUG = True
LANGUAGE_CODE = 'en'
TIME_FORMAT = [
'%I:%M %p',
'%H:%M %p',
]
TIME_INPUT_FORMATS = [
'%I:%M %p',
'%H:%M %p'
]
| Python | 0 | |
ce6c7a9e474c876829597861ce35b797b2509d42 | Add conftest.py for pytest | conftest.py | conftest.py | # This file must exist for pytest to add this directory to `sys.path`.
| Python | 0.000007 | |
cca26b50f02f098d3157501bd64e9f990fc061e2 | Create solution.py | leetcode/easy/valid_anagram/py/solution.py | leetcode/easy/valid_anagram/py/solution.py | #
# Anagram definition:
# https://en.wikipedia.org/wiki/Anagram
#
# Classic solution to the anagram problem.
# Sort both strings and check if they are equal.
#
class Solution(object):
def isAnagram(self, s, t):
"""
:type s: str
:type t: str
:rtype: bool
"""
return sorted(s) == sorted(t)
| Python | 0.000018 | |
477a57b108499184acb4d74f7aa14b7a8e10f6d8 | Create naturalreaderspeech-test.py | home/CheekyMonkey/naturalreaderspeech-test.py | home/CheekyMonkey/naturalreaderspeech-test.py | # cycle through NaturalReaderSpeech voices
# with i2c connected jaw servo
# Author: Acapulco Rolf
# Date: October 4th 2017
# Build: myrobotlab development build version 2555
from time import sleep
from org.myrobotlab.service import Speech
lang="EN" #for NaturalReaderSpeech
Voice="Ryan"
voiceType = Voice
speech = Runtime.createAndStart("Speech", "NaturalReaderSpeech")
speech.setVoice(voiceType)
speech.setLanguage(lang)
frequency = 50 #50 Hz servo frequency
adaFruit16c1 = Runtime.createAndStart("AdaFruit16C1","Adafruit16CServoDriver")
raspi = Runtime.createAndStart("RasPi","RasPi")
adaFruit16c1.setController("RasPi","1","0x40")
adaFruit16c1.setPWMFreq(0,frequency)
jawPin = 8
jawServo = Runtime.createAndStart("jaw","Servo")
mouth = Runtime.createAndStart("Mouth","MouthControl")
sleep(20) #fix for servo attach timing issue as at myrobotlab 236x development builds
jawServo.attach(adaFruit16c1,jawPin,150,-1)
jaw = mouth.getJaw()
sleep(1)
jaw.attach(adaFruit16c1,jawPin)
jawServo.setMinMax(140,180) # set min and max jaw position accordingly for your own use-case
# these min/max settings work for me for this particular jaw: https://www.thingiverse.com/thing:992918
# @Mats, thanks :)
jawServo.setRest(175)
jawServo.moveTo(100)
jawServo.rest()
mouth.setmouth(140,175)
mouth.autoAttach = False
mouth.setMouth(speech)
def onEndSpeaking(text):
sleep(.5)
#Start of main script
sleep(1)
speech.speakBlocking(text)
mouth.jaw.moveTo(175)
def saystuff():
myvoices = ['Ryan','Rich','Mike','Graham','Laura','Charles','Crystal','Heather','Ella','Rod','Peter','Audrey','Lucy','Rachel','Rosy','Ryan']
myvoicescount = len(myvoices)
for i in range(0,myvoicescount):
speech.setVoice(myvoices[i])
onEndSpeaking ("I'm completely operational, and all my circuits are functioning perfectly.")
saystuff()
| Python | 0.000018 | |
2875ee60f30ca47a8dc957250125be505e5aee07 | Add build script | build.py | build.py | #!/usr/bin/env python3
# Copyright (c) 2014, Ruslan Baratov
# All rights reserved.
import argparse
import os
import re
import shutil
import subprocess
import sys
parser = argparse.ArgumentParser(description="Script for building")
parser.add_argument(
'--toolchain',
choices=[
'libcxx',
'xcode',
'clang_libstdcxx',
'gcc48',
'vs2013x64',
'vs2013'
],
help="CMake generator/toolchain",
)
parser.add_argument(
'--type',
required=True,
help="CMake build type",
)
parser.add_argument('--verbose', action='store_true')
args = parser.parse_args()
toolchain = ''
generator = ''
tag = "{}-{}".format(args.toolchain, args.type)
if args.toolchain == 'libcxx':
toolchain = 'libcxx'
elif args.toolchain == 'xcode':
toolchain = 'xcode'
generator = '-GXcode'
tag = 'xcode'
elif args.toolchain == 'clang_libstdcxx':
toolchain = 'clang_libstdcxx'
elif args.toolchain == 'gcc48':
toolchain = 'gcc48'
elif args.toolchain == 'vs2013x64':
generator = '-GVisual Studio 12 2013 Win64'
tag = 'vs2013x64'
elif args.toolchain == 'vs2013':
generator = '-GVisual Studio 12 2013'
tag = 'vs2013'
else:
assert(False)
cdir = os.getcwd()
def call(args):
try:
print('Execute command: [')
for i in args:
print(' `{}`'.format(i))
print(']')
subprocess.check_call(
args,
stderr=subprocess.STDOUT,
universal_newlines=True
)
except subprocess.CalledProcessError as error:
print(error)
print(error.output)
sys.exit(1)
except FileNotFoundError as error:
print(error)
sys.exit(1)
call(['cmake', '--version'])
polly_root = os.getenv("POLLY_ROOT")
if not polly_root:
sys.exit("Environment variable `POLLY_ROOT` is empty")
toolchain_option = ''
if toolchain:
toolchain_path = os.path.join(polly_root, "{}.cmake".format(toolchain))
toolchain_option = "-DCMAKE_TOOLCHAIN_FILE={}".format(toolchain_path)
build_dir = os.path.join(cdir, '_builds', tag)
build_dir_option = "-B{}".format(build_dir)
build_type_for_generate_step = "-DCMAKE_BUILD_TYPE={}".format(args.type)
shutil.rmtree(build_dir, ignore_errors=True)
generate_command = [
'cmake',
'-H.',
build_dir_option,
build_type_for_generate_step
]
if generator:
generate_command.append(generator)
if toolchain_option:
generate_command.append(toolchain_option)
if args.verbose:
generate_command.append('-DCMAKE_VERBOSE_MAKEFILE=ON')
build_command = [
'cmake',
'--build',
build_dir,
'--config',
args.type
]
call(generate_command)
call(build_command)
| Python | 0.000001 | |
53dcffd4677987e6186182484e58fccde1e93d60 | change file name | h2o-py/test_hadoop/pyunit_hadoop.py | h2o-py/test_hadoop/pyunit_hadoop.py | import sys
sys.path.insert(1,"../")
import h2o
from tests import pyunit_utils
from h2o.estimators.glm import H2OGeneralizedLinearEstimator
import os
def test_hadoop():
'''
Test H2O read and write to hdfs
'''
hdfs_name_node = os.getenv("NAME_NODE")
h2o_data = h2o.import_file("hdfs://" + hdfs_name_node + "/datasets/100k.csv")
print h2o_data.head()
h2o_data.summary()
h2o_glm = H2OGeneralizedLinearEstimator(family="binomial", alpha=0.5, Lambda=0.01)
h2o_glm.train(x=range(1, h2o_data.ncol), y=0, training_frame=h2o_data)
hdfs_model_path = os.getenv("MODEL_PATH")
h2o.save_model(h2o_glm, "hdfs://" + hdfs_model_path)
new_model = h2o.load_model("hdfs://" + hdfs_model_path)
if __name__ == "__main__":
pyunit_utils.standalone_test(test_hadoop)
else:
test_hadoop()
| Python | 0.000008 | |
8edf8bbd341c8b3e8395784667da5c577aba7ac6 | Add betting.py program | ibm-ponder-this/2015-05/betting.py | ibm-ponder-this/2015-05/betting.py |
from __future__ import print_function
import itertools
import collections
import sys
class BettingGame(object):
def __init__(self, max_value=256, num_players=3):
self.max_value = max_value
self.num_players = num_players
self.STOP_STATE = tuple(0 for i in xrange(self.num_players))
def do_all(self):
print('Creating states', file=sys.stderr)
states = set(itertools.imap(self.makestate, itertools.product(xrange(1, self.max_value + 1), repeat=self.num_players)))
print('Done creating states', file=sys.stderr)
reverse_edges = collections.defaultdict(set)
for state in states:
for target in self.transitions(state):
reverse_edges[target].add(state)
print('Done adding all transitions', file=sys.stderr)
self.breadth_first(reverse_edges, self.STOP_STATE)
def makestate(self, s):
return tuple(sorted(s))
def transitions(self, state):
"""
Possible transitions from a state.
"""
if len(set(state)) < len(state):
yield self.STOP_STATE
return
for hidx in xrange(self.num_players):
for lidx in xrange(hidx):
(lower, higher) = (state[lidx], state[hidx])
yield self.makestate(((2*lower) if (i == lidx) else ((higher - lower) if (i == hidx) else s)) for (i, s) in enumerate(state))
def breadth_first(self, edges, start):
# worklist contains (element, distance_from_start)
worklist = collections.deque()
worklist.appendleft((start, 0))
# already_seen contains elements
already_seen = set([ start ])
while worklist:
(element, distance) = (last_seen, _) = worklist.pop()
# print('Element, Distance, ', element, distance, file=sys.stderr)
for neighbor in edges[element]:
if (neighbor in already_seen):
continue
already_seen.add(neighbor)
worklist.appendleft((neighbor, distance+1))
print('Last seen: {}'.format(last_seen))
print('Distance: {}'.format(distance))
BettingGame(max_value=256).do_all()
| Python | 0.000001 | |
61822398dbd2a3819a15b8c33f1cd69ff2953b5a | Move animation.fill from BiblioPixelAnimation | bibliopixel/animation/fill.py | bibliopixel/animation/fill.py | from . animation import BaseAnimation
from .. util import colors
class Fill(BaseAnimation):
"""
Fill the screen with a single color.
"""
def __init__(self, *args, color='black', **kwds):
super().__init__(*args, preclear=False, **kwds)
is_numpy = hasattr(self.color_list, 'dtype')
self._set_color = self._set_numpy if is_numpy else self._set_classic
def pre_run(self):
self.color = self._color
@property
def color(self):
return self._color
@color.setter
def color(self, color):
self._color = colors.make_color(color)
self._set_color()
def _set_numpy(self):
self.color_list[:None] = self._color
def _set_classic(self):
self.color_list[:] = [self._color] * len(self.color_list)
| Python | 0 | |
eb49c28b790bbf6ce6042f657beaf328a9e6b33f | Add inline sources | arx/sources/inline.py | arx/sources/inline.py | from collections import Container, Mapping, OrderedDict, Sequence
import math
from sh import chmod, Command, mkdir, tar
from ..err import Err
from ..decorators import signature
from . import onepath, Source, twopaths
class Inline(Source):
@onepath
def cache(self, cache):
"""Caching for inline sources is a no-op."""
pass
class InlineText(Inline):
@signature(unicode)
def __init__(self, text):
self.text = text
@twopaths
def place(self, cache, path):
mkdir('-p', path.dirname)
with open(str(path), 'w') as h:
h.write(self.text.strip() + '\n')
@onepath
def run(self, cache, args=[]):
f = cache.join('data')
self.place(cache, f)
chmod('a+rx', str(f))
cmd = Command(str(f))
cmd(*args)
class InlineBinary(Inline):
@signature(bytes)
def __init__(self, data):
self.data = data
@twopaths
def place(self, cache, path):
mkdir('-p', path.dirname)
with open(str(path), 'w') as h:
h.write(self.data)
@onepath
def run(self, cache, args=[]):
f = cache.join('data')
self.place(cache, f)
chmod('a+rx', str(f))
cmd = Command(str(f))
cmd(*args)
class InlineTarGZ(InlineBinary):
@onepath
def run(self, cache, args=[]):
raise NoExecutingInlineTars()
@twopaths
def place(self, cache, path):
mkdir('-p', path)
tar('-xz', '-C', str(path), _in=self.data)
with open(str(path), 'w') as h:
h.write(self.data)
class InlineJar(InlineBinary):
@onepath
def run(self, cache, args=[]):
jar = cache.join('data.jar')
self.place(cache, jar)
cmd = Command('java')
cmd('-jar', str(jar), *args)
class InlineCollection(Inline):
@signature((Container, OrderedDict))
def __init__(self, collection):
self.collection = collection
@twopaths
def place(self, _cache, path):
InlineCollection.unpack_collection(path, self.collection)
@onepath
def run(self, _cache, _args=[]):
raise NoExecutingCollections('Collections can not be executed.')
@staticmethod
@onepath
def unpack_pairs(under, pairs):
for path, data in pairs:
full = under.join(path)
if isinstance(data, Container):
InlineCollection.unpack_collection(full, data)
else:
mkdir('-p', full.dirname)
# TODO: rm -rf, check links, &c
with open(str(full), 'w') as h:
if isinstance(data, bytes):
h.write(bytes)
if hasattr(data, 'read'):
h.write(data.read())
h.write(str(data).strip() + '\n')
@staticmethod
def unpack_collection(under, collection):
pairs = None
if isinstance(Mapping, collection):
pairs = collection.items()
if isinstance(Sequence, collection):
fmt = '%0' + math.ceil(math.log(len(collection), 10)) + 's'
pairs = ((fmt % i, data) for i, data in enumerate(collection))
if pairs is None:
raise UnhandledCollection('Collection type %s is not handled.',
type(collection).__name__)
InlineCollection.unpack_pairs(under, pairs)
class UnhandledCollection(Err):
pass
class NoExecutingCollections(Err):
pass
class NoExecutingInlineTars(Err):
pass
| Python | 0.000001 | |
38651a6f690e39f5d5f64cdd389b031d653dcf95 | add migration for credit app status | src/wellsfargo/migrations/0028_auto_20190401_1213.py | src/wellsfargo/migrations/0028_auto_20190401_1213.py | # Generated by Django 2.2 on 2019-04-01 16:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wellsfargo', '0027_auto_20190208_1635'),
]
operations = [
migrations.AlterField(
model_name='cacreditapp',
name='status',
field=models.CharField(choices=[('', 'Unknown'), ('E0', 'Approved'), ('E1', 'Pending'), ('E2', 'Format Error'), ('E3', 'Wells Fargo Error'), ('E4', 'Denied')], default='', max_length=2, verbose_name='Application Status'),
),
migrations.AlterField(
model_name='cajointcreditapp',
name='status',
field=models.CharField(choices=[('', 'Unknown'), ('E0', 'Approved'), ('E1', 'Pending'), ('E2', 'Format Error'), ('E3', 'Wells Fargo Error'), ('E4', 'Denied')], default='', max_length=2, verbose_name='Application Status'),
),
migrations.AlterField(
model_name='uscreditapp',
name='status',
field=models.CharField(choices=[('', 'Unknown'), ('E0', 'Approved'), ('E1', 'Pending'), ('E2', 'Format Error'), ('E3', 'Wells Fargo Error'), ('E4', 'Denied')], default='', max_length=2, verbose_name='Application Status'),
),
migrations.AlterField(
model_name='usjointcreditapp',
name='status',
field=models.CharField(choices=[('', 'Unknown'), ('E0', 'Approved'), ('E1', 'Pending'), ('E2', 'Format Error'), ('E3', 'Wells Fargo Error'), ('E4', 'Denied')], default='', max_length=2, verbose_name='Application Status'),
),
]
| Python | 0 | |
0cfb4591a7754bcc08edddd17629006b5096d94d | Add handler for /sync API | synapse/handlers/sync.py | synapse/handlers/sync.py | import collections
SyncConfig = collections.namedtuple("SyncConfig", [
"user",
"device",
"since",
"limit",
"gap",
"sort"
"backfill"
"filter",
)
RoomSyncResult = collections.namedtuple("RoomSyncResult", [
"room_id",
"limited",
"published",
"prev_batch",
"events",
"state",
"event_map",
])
class SyncResult(collections.namedtuple("SyncResult", [
"next_batch", # Token for the next sync
"private_user_data", # List of private events for the user.
"public_user_data", # List of public events for all users.
"rooms", # RoomSyncResult for each room.
])):
__slots__ = []
def __nonzero__(self):
return self.private_user_data or self.public_user_data or self.rooms
class SyncHandler(BaseHandler):
def __init__(self, hs):
super(SyncHandler, self).__init__(hs)
self.event_sources = hs.get_event_sources()
def wait_for_sync_for_user(self, sync_config, since_token=None, timeout=0):
if timeout == 0:
return self.current_sync_for_user(sync_config, since)
else:
def current_sync_callback(since_token):
return self.current_sync_for_user(
self, since_token, sync_config
)
return self.notifier.wait_for_events(
sync_config.filter, since_token, current_sync_callback
)
defer.returnValue(result)
def current_sync_for_user(self, sync_config, since_token=None):
if since_token is None:
return self.inital_sync(sync_config)
else:
return self.incremental_sync(sync_config)
@defer.inlineCallbacks
def initial_sync(self, sync_config):
now_token = yield self.event_sources.get_current_token()
presence_stream = self.event_sources.sources["presence"]
# TODO (markjh): This looks wrong, shouldn't we be getting the presence
# UP to the present rather than after the present?
pagination_config = PaginationConfig(from_token=now_token)
presence, _ = yield presence_stream.get_pagination_rows(
user, pagination_config.get_source_config("presence"), None
)
room_list = yield self.store.get_rooms_for_user_where_membership_is(
user_id=user_id,
membership_list=[Membership.INVITE, Membership.JOIN]
)
# TODO (markjh): Does public mean "published"?
published_rooms = yield self.store.get_rooms(is_public=True)
published_room_ids = set(r["room_id"] for r in public_rooms)
for event in room_list:
messages, token = yield self.store.get_recent_events_for_room(
event.room_id,
limit=sync_config.limit,
end_token=now_token.room_key,
)
prev_batch_token = now_token.copy_and_replace("room_key", token[0])
current_state = yield self.state_handler.get_current_state(
event.room_id
)
rooms.append(RoomSyncResult(
room_id=event.room_id,
published=event.room_id in published_room_ids,
@defer.inlineCallbacks
def incremental_sync(self, sync_config):
| Python | 0.000001 | |
954b6d2152df52c330d59fe2b3b1cf65f5dd22cf | Create Str2Int_001.py | leetcode/008-String-to-Integer/Str2Int_001.py | leetcode/008-String-to-Integer/Str2Int_001.py | #@author: cchen
#Terrible code, and it will be updated and simplified later.
class Solution:
# @param {string} str
# @return {integer}
def extractnum(self, ss):
num = 0
for i in range(len(ss)):
if ss[i].isdigit() == False:
break
else:
num = num + 1
return ss[:num]
def isoverflow(self, sss, ispos):
if ispos:
tmp = '2147483647'
if len(sss) > len(tmp):
return True
elif len(sss) < len(tmp):
return False
for j in range(len(tmp)):
if sss[j] > tmp[j]:
return True
elif sss[j] < tmp[j]:
return False
return False
else:
tmp = '2147483648'
if len(sss) > len(tmp):
return True
elif len(sss) < len(tmp):
return False
for j in range(len(tmp)):
if sss[j] > tmp[j]:
return True
elif sss[j] < tmp[j]:
return False
return False
def myAtoi(self, str):
str = str.strip()
if len(str) == 0:
return 0
flag = True
if str[0] == '+':
str = str[1:]
elif str[0] == '-':
str = str[1:]
flag = False
if len(str) == 0 or str[0].isdigit() == False:
return 0
if flag:
n = self.extractnum(str)
if self.isoverflow(n, True) == True:
return 2147483647
else:
return int(n)
else:
n = self.extractnum(str)
if self.isoverflow(n, False) == True:
return -2147483648
else:
return -int(n)
| Python | 0.000334 | |
af6fb23f87651d5cdce3730d2cf2f2b10b571837 | test script for ngram matrix creation | dsl/features/create_ngram_matrix.py | dsl/features/create_ngram_matrix.py | from sys import argv
from featurize import Tokenizer, Featurizer
def main():
N = int(argv[1]) if len(argv) > 1 else 3
t = Tokenizer()
f = Featurizer(t, N=N)
docs = f.featurize_in_directory(argv[2])
m = f.to_dok_matrix(docs)
print m.shape
if __name__ == '__main__':
main()
| Python | 0 | |
15ff98ef08fd45354f0df4b4566c240ad84d1c31 | add ProductCategory model test | eca_catalogue/tests/models_tests.py | eca_catalogue/tests/models_tests.py | from django.test import TestCase
from eca_catalogue.tests.models import ProductCategory
class ProductCategoryTestCase(TestCase):
def test_model(self):
obj = ProductCategory.add_root(name="cat1", slug="cat1")
self.assertTrue(obj.pk)
| Python | 0.000001 | |
ad74605039052c3dd7d343c84dd1ac24f068b34f | Bump version to 0.3.15 | coil/__init__.py | coil/__init__.py | # Copyright (c) 2005-2006 Itamar Shtull-Trauring.
# Copyright (c) 2008-2009 ITA Software, Inc.
# See LICENSE.txt for details.
"""Coil: A Configuration Library."""
__version_info__ = (0,3,15)
__version__ = ".".join([str(x) for x in __version_info__])
__all__ = ['struct', 'parser', 'tokenizer', 'errors']
from coil.parser import Parser
def parse_file(file_name, **kwargs):
"""Open and parse a coil file.
See :class:`Parser <coil.parser.Parser>` for possible keyword arguments.
:param file_name: Name of file to parse.
:type file_name: str
:return: The root object.
:rtype: :class:`Struct <coil.struct.Struct>`
"""
coil = open(file_name)
return Parser(coil, file_name, **kwargs).root()
def parse(string, **kwargs):
"""Parse a coil string.
See :class:`Parser <coil.parser.Parser>` for possible keyword arguments.
:param file_name: String containing data to parse.
:type file_name: str
:return: The root object.
:rtype: :class:`Struct <coil.struct.Struct>`
"""
return Parser(string.splitlines(), **kwargs).root()
| # Copyright (c) 2005-2006 Itamar Shtull-Trauring.
# Copyright (c) 2008-2009 ITA Software, Inc.
# See LICENSE.txt for details.
"""Coil: A Configuration Library."""
__version_info__ = (0,3,14)
__version__ = ".".join([str(x) for x in __version_info__])
__all__ = ['struct', 'parser', 'tokenizer', 'errors']
from coil.parser import Parser
def parse_file(file_name, **kwargs):
"""Open and parse a coil file.
See :class:`Parser <coil.parser.Parser>` for possible keyword arguments.
:param file_name: Name of file to parse.
:type file_name: str
:return: The root object.
:rtype: :class:`Struct <coil.struct.Struct>`
"""
coil = open(file_name)
return Parser(coil, file_name, **kwargs).root()
def parse(string, **kwargs):
"""Parse a coil string.
See :class:`Parser <coil.parser.Parser>` for possible keyword arguments.
:param file_name: String containing data to parse.
:type file_name: str
:return: The root object.
:rtype: :class:`Struct <coil.struct.Struct>`
"""
return Parser(string.splitlines(), **kwargs).root()
| Python | 0 |
54a0ea2024cbfb4924642b5c23c321a0ae083e9e | Add epgen.py | epgen/epgen.py | epgen/epgen.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
epgen runtime
~~~~~~~~~~~~~
:copyright: (c) 2016 by Jihoon Kang <kang@ghoon.net>
:license: Apache 2, see LICENSE for more details
'''
import os
import argparse
from cpgen import *
from confgen import *
from prgen import *
class EpgenRuntime:
TMPL_DIR = './templates'
target_dir = './output'
config_output = './config'
config_tmpl = 'default'
install_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..')
current_path = os.getcwd()
config_file = None
confgen_mode = False
def start(self):
self.parse_args()
if self.confgen_mode:
self.generate_config()
else:
self.read_config()
self.make_dirs()
self.generate_classpath()
self.generate_project()
self.copy_rest_templates()
def parse_args(self):
parser = argparse.ArgumentParser(description='generate eclipse project templates')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--config', nargs='?', help="generate config file with given CONFIG template")
group.add_argument('config_file', nargs='?', help="generate project templates using the config file")
parser.add_argument('--workroot', nargs='?', help="project root directory", default="<work-root>")
parser.add_argument('--name', nargs='?', help="project name", default="<project-name>")
parser.add_argument('--buildtarget', nargs='?', help="build target. alticast build system specific information", default="<build-target>")
args = parser.parse_args()
if not args.config_file:
self.confgen_mode = True
if args.config:
self.config_tmpl = args.config
else:
self.config_file = args.config_file
self.project_name = args.name
self.project_rootdir = args.workroot
self.build_target = args.buildtarget
def generate_config(self):
tmpl_dir = '%s/configs' % self.install_path
generate_config(self.project_name, self.project_rootdir,
self.build_target, self.config_tmpl, tmpl_dir)
def read_config(self):
self.configs = read_config(self.config_file)
self.target_dir = self.configs['name']
def make_dirs(self):
os.system("mkdir -p %s/settings" % self.target_dir)
def generate_classpath(self):
generate_classpath(self.configs, "%s/.classpath" % self.target_dir)
def generate_project(self):
tmpl_dir = '%s/templates' % self.install_path
generate_project(self.configs, "%s/.project" % self.target_dir, tmpl_dir)
def copy_rest_templates(self):
settings_dir = "%s/settings" % self.TMPL_DIR
for root, dir, files in os.walk(settings_dir):
for f in files:
src_file = os.path.join(root, f)
dst_dir = os.path.join('%s/settings/' % self.target_dir)
os.system("cp -f %(src_file)s %(dst_dir)s" % locals())
if __name__ == '__main__':
EpgenRuntime().start()
| Python | 0.000032 | |
3498ddd7817e72b3f6f0b851fa94e82047cb9129 | Create the config file if doesn't exist | chubby/config.py | chubby/config.py | import os
def create_if_not_exists():
"""
Create the config file if doesn't exist already.
"""
# check if it exists
if not os.path.exists(os.path.join(os.path.expand("~"), '.chubby')):
os.chdir(os.path.expand("~"))
# create file
with open(".chubby", 'a'):
pass
| Python | 0.000002 | |
7a4d2139e34b234f596dea67a8fc8becf526a24e | convert unsatisfied file dependencies to trove dependencies (CNP-184) | policy/resolvefiledeps.py | policy/resolvefiledeps.py | #
# Copyright (c) 2009 rPath, Inc.
#
# This program is distributed under the terms of the Common Public License,
# version 1.0. A copy of this license should have been distributed with this
# source file in a file called LICENSE. If it is not present, the license
# is always available at http://www.rpath.com/permanent/licenses/CPL-1.0.
#
# This program is distributed in the hope that it will be useful, but
# without any warranty; without even the implied warranty of merchantability
# or fitness for a particular purpose. See the Common Public License for
# full details.
#
import itertools
import os
import re
from conary.build import policy
from conary.build import packagepolicy
from conary.deps import deps
from conary.local import database
class ResolveFileDependencies(policy.PackagePolicy):
"""
NAME
====
B{C{r.ResolveFileDependencies()}} - Change unresolved C{file:}
dependencies to trove dependencies
SYNOPSIS
========
C{r.ResolveFileDependencies([I{exceptions=regexp}])}
DESCRIPTION
===========
The C{r.ResolveFileDependencies()} policy finds C{file:} requirements
that are not resolved by C{file:} provides, and replaces them with
appropriate C{trove:} requirements in the trove. It does not
modify the requirements on the individual file objects.
The C{exceptions} keyword matches file dependencies not to modify.
The C{r.ResolveFileDependencies()} policy looks up paths first in
the local database (add items to C{buildRequires} in the recipe to
ensure that packages are in the local database), and secondarily
searches the C{installLabelPath}.
EXAMPLES
========
C{r.ResolveFileDependencies(exceptions='.*')}
Do not convert any file requirement to a trove requirement.
C{r.ResolveFileDependencies(exceptions='/usr/sbin/httpd')}
Do not convert the C{/usr/sbin/http} file requirement to a trove
requirement.
"""
requires = (
('RemoveSelfProvidedRequires', policy.REQUIRED_PRIOR),
)
def do(self):
self.cfg = self.recipe.cfg
self.repos = self.recipe.getRepos()
self.db = self.recipe._db
for comp in self.recipe.autopkg.getComponents():
req = comp.requires
prov = comp.provides
# get the deps that we want to resolve
proposedFileDeps = set(req.iterDepsByClass(deps.FileDependencies))
fileDeps = set()
if self.exceptions:
reList = [re.compile(x % self.macros)
for x in self.exceptions]
for f in proposedFileDeps:
for r in reList:
if r.match(str(f)):
break
else:
fileDeps.add(f)
else:
fileDeps = proposedFileDeps
if not fileDeps:
continue
addedTroveDeps = []
removedFileDeps = []
self.resolveLocal(fileDeps, comp, addedTroveDeps, removedFileDeps)
self.resolveRepo(fileDeps, comp, addedTroveDeps, removedFileDeps)
# update the components deps
if len(addedTroveDeps):
req.addDeps(deps.TroveDependencies,addedTroveDeps)
req.removeDeps(deps.FileDependencies,removedFileDeps)
def resolveLocal(self, fileDeps, comp, addedTroveDeps, removedFileDeps):
if not fileDeps:
return
locDepSets = set()
trvMap = {}
for fDep in fileDeps.copy():
f = str(fDep)
trv0 = None
for trv in self.db.iterTrovesByPath(f):
if not trv0:
trv0 = trv
if trv.provides().satisfies(
self.toDepSet(fDep,deps.FileDependencies)):
break
else:
if trv0:
trovName = trv0.getName()
addedTroveDeps.append(deps.Dependency(trovName))
removedFileDeps.append(fDep)
fileDeps.remove(fDep)
def resolveRepo(self,fileDeps, comp, addedTroveDeps, removedFileDeps):
if not fileDeps:
return
paths = [str(x) for x in fileDeps]
trvMap = {}
repoDepSets = set()
for label in self.cfg.installLabelPath:
pathDict = self.repos.getTroveLeavesByPath(paths, label)
for p in pathDict:
if p not in trvMap and pathDict[p]:
trvMap[p] = pathDict[p][0]
repoDepSets.add(self.toDepSet(deps.Dependency(p),
deps.FileDependencies))
if not repoDepSets:
return
resolvedDepSets = set()
for label in self.cfg.installLabelPath:
solMap = self.repos.resolveDependencies(
label, repoDepSets, leavesOnly=True)
for r in solMap:
solList = solMap[r]
for s in itertools.chain(*solList):
if s[2].satisfies(comp.flavor):
resolvedDepSets.add(r)
break
unresolvedDepSets = repoDepSets - resolvedDepSets
for ds in unresolvedDepSets:
fDep = list(ds.iterDeps())[0][1]
f = str(fDep)
nvf = trvMap[f]
if nvf[2].satisfies(comp.flavor):
trovName = nvf[0]
addedTroveDeps.append(deps.Dependency(trovName))
removedFileDeps.append(fDep)
fileDeps.remove(fDep)
def toDepSet(self, dep, depClass):
ds = deps.DependencySet()
ds.addDep(depClass, dep)
return ds
def toDepSets(self, deps, depClass):
s = set()
for d in deps:
s.add(self.toDepSet(d, depClass))
return s
| Python | 0 | |
96476a32e545184908f64aac41b23987255138e2 | Create new package. (#6623) | var/spack/repos/builtin/packages/py-htseq/package.py | var/spack/repos/builtin/packages/py-htseq/package.py | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyHtseq(PythonPackage):
"""HTSeq is a Python package that provides infrastructure to process
data from high-throughput sequencing assays."""
homepage = "http://htseq.readthedocs.io/en/release_0.9.1/overview.html"
url = "https://github.com/simon-anders/htseq/archive/release_0.9.1.tar.gz"
version('0.9.1', '269e7de5d39fc31f609cccd4a4740e61')
depends_on('py-setuptools', type='build')
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-pysam', type=('build', 'run'))
depends_on('py-matplotlib', type=('build', 'run'))
depends_on('py-cython', type=('build', 'run'))
depends_on('swig', type=('build', 'run'))
| Python | 0 | |
5503e1f54298a5b6121e35794d43c6642b3af6e0 | Add lc0340_longest_substring_with_at_most_k_distinct_characters.py | lc0340_longest_substring_with_at_most_k_distinct_characters.py | lc0340_longest_substring_with_at_most_k_distinct_characters.py | """Leetcode 340. Longest Substring with At Most K Distinct Characters
Hard
URL: https://leetcode.com/problems/longest-substring-with-at-most-k-distinct-characters/
Given a string, find the length of the longest substring T that contains at most k
distinct characters.
Example 1:
Input: s = "eceba", k = 2
Output: 3
Explanation: T is "ece" which its length is 3.
Example 2:
Input: s = "aa", k = 1
Output: 2
Explanation: T is "aa" which its length is 2.
"""
class Solution(object):
def lengthOfLongestSubstringKDistinct(self, s, k):
"""
:type s: str
:type k: int
:rtype: int
"""
pass
def main():
pass
if __name__ == '__main__':
main()
| Python | 0.998744 | |
6da928b7e113e30af0da0aa5b18d48c9584a631d | add script | ditto.py | ditto.py | #!/usr/local/bin/python3
"""
The purpose of this script is to update dot files somewhere. It works in the
following way. Two locations are set
dothome : ($HOME)
absolute path to the set the dotfiles
dotarchive : ($HOME/.dotarchive)
absolute path to the dot files (usually some git archive)
Then symlinks are made from dothome to dotarchive. Simple as that.
"""
def main():
# import os
# dothome = os.path.expanduser('~')
# dotarchive = os.path.join(dothome, '.dotarchive')
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("dothome",
help="absolute path to the dotfiles")
parser.add_argument("dotarchive",
help="absolute path to the dotfile archive")
args = parser.parse_args()
print(args.dothome)
print(args.dotarchive)
if __name__ == "__main__":
main()
| Python | 0.000001 | |
db06d0c08fe4364314f17174e9685b44a95c874f | Use a "Session Factory" instead simply creating Session objects | demos/logging_proxy.py | demos/logging_proxy.py | #!/usr/bin/env python
#
# logging_proxy.py: Demonstrates how to inherit the Session class so that we can easily get notifications of I/O events .
# The idea - by overriding 6 simple function, we get all the I/O events we need in order to fully monitor
# the connection (new connnection,got-data, close-connection)
import tornado.ioloop
import maproxy.proxyserver
import maproxy.session
import string # for the "filter"
class LoggingSession(maproxy.session.Session):
"""
This class simply overrides the major "session" functions of the parent-class.
The idea is very simple: every time the proxy has done "something" , we will
"intercept" the call , print the data and .
Note that the actually code is very small. all my comments and explanations taking too much space !
There are 6 functions that we would like to monitor:
- new_connection : New session
- on_p2s_done_connect : When the session is connected to the server
- on_c2p_done_read : C->S data
- on_p2s_done_read : C<-S data
- on_c2p_close : Client closes he connection
- on_p2s_close : Server closes he connection
"""
# Class variable: counter for the number of connections
running_counter=0
def __init__(self,*args,**kwargs):
"""
Currently overriding the "__init__" is not really required since the parent's
__init__ is doing absolutely nothing, but it is a good practice for
the future... (future updates)
"""
super(LoggingSession,self).__init__(*args,**kwargs)
#def new_connection(self,stream : tornado.iostream.IOStream ,address,proxy):
def new_connection(self,stream ,address,proxy):
"""
Override the maproxy.session.Session.new_connection() function
This function is called by the framework (proxyserver) for every new session
"""
# Let's increment the "autonumber" (remember: this is single-threaded, so on lock is required)
LoggingSession.running_counter+=1
self.connid=LoggingSession.running_counter
print("#%-3d: New Connection on %s" % (self.connid,address))
super(LoggingSession,self).new_connection(stream,address,proxy)
def on_p2s_done_connect(self):
"""
Override the maproxy.session.Session.on_p2s_done_connect() function
This function is called by the framework (proxyserver) when the session is
connected to the target-server
"""
print("#%-3d: Server connected" % (self.connid))
super(LoggingSession,self).on_p2s_done_connect()
def on_c2p_done_read(self,data):
"""
Override the maproxy.session.Session.on_c2p_done_read(data) function
This function is called by the framework (proxyserver) when we get data from the client
(to the target-server)
"""
# First, let's call the parent-class function (on_cp2_done_read),
# this will minimize network delay and complete the operation
super(LoggingSession,self).on_c2p_done_read(data)
# Now let simply print the data (print just the printable characters)
print("#%-3d:C->S (%d bytes):\n%s" % (self.connid,len(data),filter(lambda x: x in string.printable, data)) )
def on_p2s_done_read(self,data):
"""
Override the maproxy.session.Session.on_p2s_done_read(data) function
This function is called by the framework (proxyserver) when we get data from the server
(to the client)
"""
# First, let's call the parent-class function (on_p2s_done_read),
# this will minimize network delay and complete the operation
super(LoggingSession,self).on_p2s_done_read(data)
# Now let simply print the data (print just the printable characters)
print("#%-3d:C<-S (%d bytes):\n%s" % (self.connid,len(data),filter(lambda x: x in string.printable, data)) )
def on_c2p_close(self):
"""
Override the maproxy.session.Session.on_c2p_close() function.
This function is called by the framework (proxyserver) when the client closes the connection
"""
print("#%-3d: C->S Closed" % (self.connid))
super(LoggingSession,self).on_c2p_close()
def on_p2s_close(self):
"""
Override the maproxy.session.Session.on_p2s_close() function.
This function is called by the framework (proxyserver) when the server closes the connection
"""
print("#%-3d: C<-S Closed" % (self.connid))
super(LoggingSession,self).on_p2s_close()
class LoggingSessionFactory(maproxy.session.SessionFactory):
"""
This session-factory will be used by the proxy when new sessions
need to be generated .
We only need a "new" function that will generate a session object
that derives from maproxy.session.Session.
The session that we create is our lovely LoggingSession that we declared
earlier
"""
def __init__(self):
super(LoggingSessionFactory,self).__init__()
def new(self,*args,**kwargs):
return LoggingSession(*args,**kwargs)
# HTTP->HTTP
# On your computer, browse to "http://127.0.0.1:81/" and you'll get http://www.google.com
# The only "special" argument is the "session_factory" that ponits to a new instance of LoggingSessionFactory.
# By using our special session-factory, the proxy will create the
# LoggingSession instances (instead of default Session instances)
server = maproxy.proxyserver.ProxyServer("www.google.com",80,session_factory=LoggingSessionFactory())
server.listen(81)
print("http://127.0.0.1:81 -> http://www.google.com")
tornado.ioloop.IOLoop.instance().start()
| Python | 0 | |
0d0bf5b67f432fd4ee182b9026ea6e319babf9bd | Create ChamBus_create_database.py | ChamBus_create_database.py | ChamBus_create_database.py | # coding: utf-8
# https://github.com/ChamGeeks/GetAroundChamonix/blob/master/www/js/services/TripPlanner.js
import datetime, os, requests, sqlite3
db_filename = 'ChamBus.db'
db_url = 'https://chx-transit-db.herokuapp.com/api/export/sql'
if os.path.exists(db_filename):
exit(db_filename + ' already exists. Rename or delete it and rerun this script.')
print('Initializing {}...'.format(db_filename))
start = datetime.datetime.now()
with sqlite3.connect(db_filename) as conn:
print('Reading sql commands from: {} ...'.format(db_url))
cursor = conn.executescript(requests.get(db_url).text)
print('Database tables are:')
cursor.execute("SELECT name FROM sqlite_master WHERE type='table';")
print('\n'.join(sorted(x[0] for x in cursor.fetchall())))
conn.commit()
print('Elapsed time: {}'.format(datetime.datetime.now() - start))
print('=====\nDone.')
| Python | 0.000003 | |
a7b31346835c8fdd1724432596650a6de137fe3f | test read_meta | test/Python/test_Func.py | test/Python/test_Func.py | import os, sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../bin')))
from file_def import read_meta
import unittest
class BasicTestSuite(unittest.TestCase):
def test_read_meta(self):
meta_file = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../data/SraRunTable.txt'))
meta_dic=read_meta(meta_file, 0, 25)
print(meta_dic)
if __name__ == '__main__':
unittest.main() | Python | 0.000001 | |
c8d48e9996f048b1844258ef427c4359645521c6 | Create solution.py | leetcode/easy/length_of_last_word/py/solution.py | leetcode/easy/length_of_last_word/py/solution.py | class Solution(object):
def lengthOfLastWord(self, s):
"""
:type s: str
:rtype: int
"""
words = s.split()
if len(words) > 0:
return len(words[-1])
return 0
| Python | 0.000018 | |
a84f965e16e68cb8973d6cc91fbacec56bb92a64 | add lottery.py | ext/lottery.py | ext/lottery.py | import decimal
import logging
import discord
from discord.ext import commands
from .common import Cog
log = logging.getLogger(__name__)
PERCENTAGE_PER_TAXBANK = (0.2 / 100)
TICKET_PRICE = 20
class Lottery(Cog):
"""Weekly lottery.
The lottery works with you buying a 20JC lottery ticket.
Every Saturday, a winner is chosen from the people
who bought a ticket.
The winner gets 0.2% of money from all taxbanks.
"""
def __init__(self, bot):
super().__init__(bot)
self.ticket_coll = self.config.jose_db['lottery']
@commands.group(aliases=['l'], invoke_without_command=True)
async def lottery(self, ctx):
"""Show current lottery state.
A read 'j!help Lottery' is highly recommended.
"""
amount = decimal.Decimal(0)
async for account in self.jcoin.all_accounts('taxbank'):
amount += PERCENTAGE_PER_TAXBANK * account['amount']
await ctx.send('Next saturday you have a chance to win: '
f'`{amount:.2}JC`')
@lottery.command()
async def users(self, ctx):
"""Show the users that are in the current lottery."""
em = discord.Embed()
users = []
async for ticket in self.ticket_coll.find():
users.append(f'<@{ticket["user_id"]}>')
em.add_field(name='Users', value='\n'.join(users))
await ctx.send(embed=em)
@lottery.command()
async def enter(self, ctx, amount: decimal.Decimal):
"""Enter the weekly lottery."""
await ctx.send('not implemented yet')
# Check if the user is in jose guild
# Pay 20jc to jose
# put user in ticket collection
# send message to #lottery-log
def setup(bot):
bot.add_cog(Lottery(bot))
| Python | 0.999723 | |
210429b1acbb099479c06f5bd4ceddfabfa6ee5c | Create qualysguard_remediation_ignore_non-running_kernels.py | qualysguard_remediation_ignore_non-running_kernels.py | qualysguard_remediation_ignore_non-running_kernels.py | #!/usr/bin/env python
| Python | 0.000002 | |
c92954f240ef990eae06967c12426367f0eb6319 | Add migration | readthedocs/donate/migrations/0003_add-impressions.py | readthedocs/donate/migrations/0003_add-impressions.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('donate', '0002_dollar-drop-choices'),
]
operations = [
migrations.CreateModel(
name='SupporterImpressions',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('date', models.DateField(verbose_name='Date')),
('offers', models.IntegerField(default=0, verbose_name='Offer')),
('views', models.IntegerField(default=0, verbose_name='View')),
('clicks', models.IntegerField(default=0, verbose_name='Clicks')),
('promo', models.ForeignKey(related_name='impressions', blank=True, to='donate.SupporterPromo', null=True)),
],
),
]
| Python | 0.000002 | |
dc7cf288c5c5c9733a59184770fbaa26db036833 | Add basic tests for custom_urls system | tests/unit_project/test_core/test_custom_urls.py | tests/unit_project/test_core/test_custom_urls.py | # -*- coding: utf-8 -*-
from djangosanetesting import UnitTestCase
from django.http import Http404
from ella.core.custom_urls import DetailDispatcher
# dummy functions to register as views
def view(request, bits, context):
return request, bits, context
def custom_view(request, context):
return request, context
class TestCustomUrlsDispatcher(UnitTestCase):
def setUp(self):
self.dispatcher = DetailDispatcher()
self.context = {'object': self}
self.request = object()
def test_no_extension(self):
self.assert_raises(Http404, self.dispatcher._get_view, 'start', self)
def test_register_global_extension(self):
self.dispatcher.register('start', view)
self.assert_equals(view, self.dispatcher._get_view('start', self))
def test_register_extension_for_model(self):
self.dispatcher.register('another_start', view, model=self.__class__)
self.assert_equals(view, self.dispatcher._get_view('another_start', self.__class__))
def test_register_extension_for_model_not_work_for_other_models(self):
self.dispatcher.register('start', view, model=self.__class__)
self.assert_raises(Http404, self.dispatcher._get_view, 'start', object())
def test_no_custom_view(self):
self.assert_raises(Http404, self.dispatcher._get_custom_detail_view, self.__class__)
def test_register_custom_view(self):
self.dispatcher.register_custom_detail(self.__class__, custom_view)
self.assert_equals(custom_view, self.dispatcher._get_custom_detail_view(self.__class__))
| Python | 0 | |
861120c5ba7e6e126cac13497a489bc035d27026 | add partition show | bin/partition_show.py | bin/partition_show.py | #!/usr/bin/python
import datetime
import MySQLdb
import json
import os
CONFIG_FILE="partition.json"
# -----------------------------------
def config_read(filename):
config = json.load(open(filename))
return config
# -----------------------------------
def date_show_all_partitions(conn, tablename):
lists = []
infotable = "information_schema.PARTITIONS"
sql = "SELECT PARTITION_NAME FROM "+ infotable +" WHERE TABLE_NAME='"+ tablename +"' ORDER BY PARTITION_NAME desc;"
cur = conn.cursor()
cur.execute(sql)
res = cur.fetchall()
for row in res:
lists.append(row[0])
cur.close()
return lists
def partition_exec(conn, table):
lists = date_show_all_partitions(conn, table)
for v in lists:
if v == "pmax":
continue
print table + ":" + v
def main():
path = os.path.join(os.path.join(os.path.dirname(__file__), ".."), "config");
conf = config_read(os.path.join(path, CONFIG_FILE))
myconf = conf["MYSQL"]
conn = MySQLdb.connect(host=myconf["HOST"], db=myconf["DB"], user=myconf["USER"], passwd=myconf["PASS"])
for table in conf["TABLES"]:
partition_exec(conn, table)
conn.close()
main()
| Python | 0 | |
480d29bfc92b8dcee3fc02a05e5588085f1bd3bc | new tool: tools/skqp/find_commit_with_best_gold_results | tools/skqp/find_commit_with_best_gold_results.py | tools/skqp/find_commit_with_best_gold_results.py | #! /usr/bin/env python
# Copyright 2019 Google LLC.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import re
import subprocess
import sys
import threading
import urllib
import urllib2
assert '/' in [os.sep, os.altsep]
skia_directory = os.path.abspath(os.path.dirname(__file__) + '/../..')
def get_jobs():
path = skia_directory + '/infra/bots/jobs.json'
reg = re.compile('Test-(?P<os>[A-Za-z0-9_]+)-'
'(?P<compiler>[A-Za-z0-9_]+)-'
'(?P<model>[A-Za-z0-9_]+)-GPU-'
'(?P<cpu_or_gpu_value>[A-Za-z0-9_]+)-'
'(?P<arch>[A-Za-z0-9_]+)-'
'(?P<configuration>[A-Za-z0-9_]+)-'
'All(-(?P<extra_config>[A-Za-z0-9_]+)|)')
keys = ['os', 'compiler', 'model', 'cpu_or_gpu_value', 'arch',
'configuration', 'extra_config']
def fmt(s):
return s.encode('utf-8') if s is not None else ''
with open(path) as f:
jobs = json.load(f)
for job in jobs:
m = reg.match(job)
if m is not None:
yield [(k, fmt(m.group(k))) for k in keys]
def gold_export_url(job, config, first_commit, last_commit):
qq = [('source_type', 'gm'), ('config', config)] + job
query = [
('fbegin', first_commit),
('fend', last_commit),
('query', urllib.urlencode(qq)),
('pos', 'true'),
('neg', 'false'),
('unt', 'false'),
('head', 'true')
]
return 'https://public-gold.skia.org/json/export?' + urllib.urlencode(query)
def get_results_for_commit(commit, jobs):
sys.stderr.write('%s\n' % commit)
CONFIGS = ['gles', 'vk']
passing_tests_for_all_jobs = []
def process(url):
testResults = json.load(urllib2.urlopen(url))
sys.stderr.write('.')
passing_tests = 0
for t in testResults:
assert t['digests']
passing_tests += 1
passing_tests_for_all_jobs.append(passing_tests)
all_urls = [gold_export_url(job, config, commit, commit)
for job in jobs for config in CONFIGS]
threads = [threading.Thread(target=process, args=(url,)) for url in all_urls]
for t in threads:
t.start()
for t in threads:
t.join()
result = sum(passing_tests_for_all_jobs)
sys.stderr.write('\n%d\n' % result)
return result
def find_best_commit(commits):
jobs = [j for j in get_jobs()]
results = []
for commit_name in commits:
commit_hash = subprocess.check_output(['git', 'rev-parse', commit_name]).strip()
results.append((commit_hash, get_results_for_commit(commit_hash, jobs)))
best_result = max(r for h, r in results)
for h, r in results:
if r == best_result:
return h
return None
def generate_commit_list(commit_count, starting_commit):
for i in range(commit_count):
yield starting_commit + '~%d' % i
if __name__ == '__main__':
os.chdir(skia_directory)
subprocess.check_call(['git', 'fetch', 'origin'])
print find_best_commit(generate_commit_list(65, 'origin/master'))
| Python | 0.999916 | |
c50a7189e730fc3e95eb209eed00ebdcd7001bde | Create ImgurStorage.py | ImgurStorage.py | ImgurStorage.py | import base64
import os
import tempfile
from django.core.exceptions import SuspiciousFileOperation
from django.core.files import File
from django.utils._os import safe_join
import requests
from django.core.files.storage import Storage
from imgurpython import ImgurClient
class ImgurStorage(Storage):
"""
Uses the Imgur cloud service to store images.
Great for Heroku
This is just a gist, needs some work.
"""
client_id = "LOL"
client_secret = "LOL"
access_token = "LOL"
refresh_token = "LOL"
def upload(self, path):
return self.client.upload_from_path(path)
def __init__(self):
super(ImgurStorage, self).__init__()
self.client = ImgurClient(self.client_id, self.client_secret, self.access_token, self.refresh_token)
def _open(self, name, mode='rb'):
file_url = "http://i.imgur.com/{0}.png".format(name)
r = requests.get(file_url)
f = tempfile.NamedTemporaryFile(delete=False)
for chunk in r.iter_content(chunk_size=512 * 1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.close()
return File(f)
def uploaded_path(self, name):
try:
path = safe_join(self.location, name)
except ValueError:
raise SuspiciousFileOperation("Attempted access to '%s' denied." % name)
return os.path.normpath(path)
def get_available_name(self, name):
return name
def _save(self, name, content):
"""
Saves new content to the file specified by name. The content should be
a proper File object or any python file-like object, ready to be read
from the beginning.
"""
# Get the proper name for the file, as it will actually be saved.
if name is None:
name = content.name
if not hasattr(content, 'chunks'):
content = File(content)
content.open()
data = {
'image': base64.b64encode(content.read()),
'type': 'base64',
'meta': {}
}
ret = self.client.make_request('POST', 'upload', data, True)
content.close()
return ret["id"]
def url(self, name):
return "http://i.imgur.com/{0}.png".format(name)
def get_valid_name(self, name):
return name
def exists(self, name):
return True
| Python | 0.000001 | |
e5347530923208abec844e3c41ae3f6680ec42e5 | Add Ironic module | lib/ansible/modules/cloud/openstack/os_ironic.py | lib/ansible/modules/cloud/openstack/os_ironic.py | #!/usr/bin/python
# coding: utf-8 -*-
# (c) 2014, Hewlett-Packard Development Company, L.P.
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
# TODO FIX UUID/Add node support
DOCUMENTATION = '''
---
module: os_ironic
short_description: Create/Delete Bare Metal Resources from OpenStack
version_added: "1.10"
extends_documentation_fragment: openstack
description:
- Create or Remove Ironic nodes from OpenStack.
options:
state:
description:
- Indicates desired state of the resource
choices: ['present', 'absent']
default: present
uuid:
description:
- globally unique identifier (UUID) to be given to the resource. Will
be auto-generated if not specified.
required: false
default: None
driver:
description:
- The name of the Ironic Driver to use with this node.
required: true
default: None
ironic_url:
description:
- If noauth mode is utilized, this is required to be set to the
endpoint URL for the Ironic API. Use with "auth" and "auth_plugin"
settings set to None.
required: false
default: None
driver_info:
description:
- Information for this server's driver. Will vary based on which
driver is in use. Any sub-field which is populated will be validated
during creation.
power:
- Information necessary to turn this server on / off. This often
includes such things as IPMI username, password, and IP address.
required: true
deploy:
- Information necessary to deploy this server directly, without
using Nova. THIS IS NOT RECOMMENDED.
console:
- Information necessary to connect to this server's serial console.
Not all drivers support this.
management:
- Information necessary to interact with this server's management
interface. May be shared by power_info in some cases.
required: true
nics:
description:
- A list of network interface cards, eg, " - mac: aa:bb:cc:aa:bb:cc"
required: true
properties:
description:
- Definition of the physical characteristics of this server, used for
scheduling purposes
cpu_arch:
description:
- CPU architecture (x86_64, i686, ...)
default: x86_64
cpus:
description:
- Number of CPU cores this machine has
default: 1
ram:
description:
- amount of RAM this machine has, in MB
default: 1
disk_size:
description:
- size of first storage device in this machine (typically
/dev/sda), in GB
default: 1
requirements: ["shade"]
'''
EXAMPLES = '''
# Enroll a node with some basic properties and driver info
- os_ironic:
cloud: "devstack"
driver: "pxe_ipmitool"
uuid: "a8cb6624-0d9f-4882-affc-046ebb96ec92"
properties:
cpus: 2
cpu_arch: "x86_64"
ram: 8192
disk_size: 64
nics:
- mac: "aa:bb:cc:aa:bb:cc"
- mac: "dd:ee:ff:dd:ee:ff"
driver_info:
power:
ipmi_address: "1.2.3.4"
ipmi_username: "admin"
ipmi_password: "adminpass"
'''
def _parse_properties(module):
p = module.params['properties']
props = dict(
cpu_arch=p.get('cpu_arch') if p.get('cpu_arch') else 'x86_64',
cpus=p.get('cpus') if p.get('cpus') else 1,
memory_mb=p.get('ram') if p.get('ram') else 1,
local_gb=p.get('disk_size') if p.get('disk_size') else 1,
)
return props
def _parse_driver_info(module):
p = module.params['driver_info']
info = p.get('power')
if not info:
raise shade.OpenStackCloudException(
"driver_info['power'] is required")
if p.get('console'):
info.update(p.get('console'))
if p.get('management'):
info.update(p.get('management'))
if p.get('deploy'):
info.update(p.get('deploy'))
return info
def main():
argument_spec = openstack_full_argument_spec(
uuid=dict(required=False),
driver=dict(required=True),
driver_info=dict(type='dict', required=True),
nics=dict(type='list', required=True),
properties=dict(type='dict', default={}),
ironic_url=dict(required=False),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec, **module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
if (module.params['auth_plugin'] == 'None' and
module.params['ironic_url'] is None):
module.fail_json(msg="Authentication appears disabled, Please "
"define an ironic_url parameter")
if module.params['ironic_url'] and module.params['auth_plugin'] == 'None':
module.params['auth'] = dict(endpoint=module.params['ironic_url'])
try:
cloud = shade.operator_cloud(**module.params)
server = cloud.get_machine_by_uuid(module.params['uuid'])
if module.params['state'] == 'present':
properties = _parse_properties(module)
driver_info = _parse_driver_info(module)
kwargs = dict(
uuid=module.params['uuid'],
driver=module.params['driver'],
properties=properties,
driver_info=driver_info,
)
if server is None:
server = cloud.register_machine(module.params['nics'],
**kwargs)
module.exit_json(changed=True, uuid=server.uuid)
else:
# TODO: compare properties here and update if necessary
# ... but the interface for that is terrible!
module.exit_json(changed=False,
result="Server already present")
if module.params['state'] == 'absent':
if server is not None:
cloud.unregister_machine(module.params['nics'],
module.params['uuid'])
module.exit_json(changed=True, result="deleted")
else:
module.exit_json(changed=False, result="Server not found")
except shade.OpenStackCloudException as e:
module.fail_json(msg=e.message)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
main()
| Python | 0 | |
c153bc9422308599d1354abf782273ca7bd78952 | Add a few unit tests for libvirt_conn. | nova/tests/virt_unittest.py | nova/tests/virt_unittest.py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2010 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import flags
from nova import test
from nova.virt import libvirt_conn
FLAGS = flags.FLAGS
class LibvirtConnTestCase(test.TrialTestCase):
def test_get_uri_and_template(self):
class MockDataModel(object):
def __init__(self):
self.datamodel = { 'name' : 'i-cafebabe',
'memory_kb' : '1024000',
'basepath' : '/some/path',
'bridge_name' : 'br100',
'mac_address' : '02:12:34:46:56:67',
'vcpus' : 2 }
type_uri_map = { 'qemu' : ('qemu:///system',
[lambda s: '<domain type=\'qemu\'>' in s,
lambda s: 'type>hvm</type' in s,
lambda s: 'emulator>/usr/bin/kvm' not in s]),
'kvm' : ('qemu:///system',
[lambda s: '<domain type=\'kvm\'>' in s,
lambda s: 'type>hvm</type' in s,
lambda s: 'emulator>/usr/bin/qemu<' not in s]),
'uml' : ('uml:///system',
[lambda s: '<domain type=\'uml\'>' in s,
lambda s: 'type>uml</type' in s]),
}
for (libvirt_type,(expected_uri, checks)) in type_uri_map.iteritems():
FLAGS.libvirt_type = libvirt_type
conn = libvirt_conn.LibvirtConnection(True)
uri, template = conn.get_uri_and_template()
self.assertEquals(uri, expected_uri)
for i, check in enumerate(checks):
xml = conn.toXml(MockDataModel())
self.assertTrue(check(xml), '%s failed check %d' % (xml, i))
# Deliberately not just assigning this string to FLAGS.libvirt_uri and
# checking against that later on. This way we make sure the
# implementation doesn't fiddle around with the FLAGS.
testuri = 'something completely different'
FLAGS.libvirt_uri = testuri
for (libvirt_type,(expected_uri, checks)) in type_uri_map.iteritems():
FLAGS.libvirt_type = libvirt_type
conn = libvirt_conn.LibvirtConnection(True)
uri, template = conn.get_uri_and_template()
self.assertEquals(uri, testuri)
| Python | 0 | |
07500dbd92aa15540ddf77b96a7072c5f66d34b2 | Add files via upload | heat_map.py | heat_map.py | # -*- coding: utf-8 -*-
"""
Created on Wed Jun 21 17:27:18 2017
@author: DWyatt
"""
import pandas as pd
import seaborn as sns
import sys
df_train = pd.read_csv('train.csv')
target = 'SalePrice'
variables = [column for column in df_train.columns if column!=target]
corr = df_train.corr()
sns_heat= sns.heatmap(corr, square=True)
fig = sns_heat.get_figure()
fig.savefig('heat.png')
print([target])
print(variables)
#sys.exit()
#sns_pair = sns.pairplot(df_train,
#x_vars=['SalePrice'],
#y_vars=['LotFrontage', 'Neighborhood'])
#fig = sns_pair.get_figure()
#fig.savefig('pair.png') | Python | 0 | |
e755977ee0ada391149e55d3331bf2ffe045d243 | Add a build configuration test for zlib, for #187 | examples/tests/test_build_config.py | examples/tests/test_build_config.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vi:ts=4:et
import pycurl
import zlib
try:
from io import BytesIO
except ImportError:
try:
from cStringIO import StringIO as BytesIO
except ImportError:
from StringIO import StringIO as BytesIO
c = pycurl.Curl()
c.setopt(c.URL, 'http://pycurl.sourceforge.net')
#c.setopt(c.ENCODING, 'deflate')
c.setopt(c.HTTPHEADER, ['Accept-Encoding: deflate'])
body = BytesIO()
c.setopt(c.WRITEFUNCTION, body.write)
encoding_found = False
def header_function(header):
global encoding_found
if header.decode('iso-8859-1').lower().startswith('content-encoding: deflate'):
encoding_found = True
c.setopt(c.HEADERFUNCTION, header_function)
c.perform()
assert encoding_found
print('Server supports deflate encoding')
encoded = body.getvalue()
# should not raise exceptions
zlib.decompress(encoded, -zlib.MAX_WBITS)
print('Server served deflated body')
c.reset()
c.setopt(c.URL, 'http://pycurl.sourceforge.net')
c.setopt(c.ENCODING, 'deflate')
body = BytesIO()
c.setopt(c.WRITEFUNCTION, body.write)
encoding_found = False
def header_function(header):
global encoding_found
if header.decode('iso-8859-1').lower().startswith('content-encoding: deflate'):
encoding_found = True
c.setopt(c.HEADERFUNCTION, header_function)
c.perform()
assert encoding_found
print('Server claimed deflate encoding as expected')
# body should be decoded
encoded = body.getvalue()
if '<html' in encoded.decode('iso-8859-1').lower():
print('Curl inflated served body')
else:
fail = False
try:
zlib.decompress(encoded, -zlib.MAX_WBITS)
print('Curl did not inflate served body')
fail = True
except:
print('Weird')
fail = True
if fail:
assert False
c.close()
| Python | 0 | |
9f1c5612c717bac3690d093a27a0a362ff4793b4 | add parameters class for fitting data | nsls2/fitting/parameters.py | nsls2/fitting/parameters.py | # Copyright (c) Brookhaven National Lab 2O14
# All rights reserved
# BSD License
# See LICENSE for full text
# @author: Li Li (lili@bnl.gov)
# created on 07/20/2014
class ParameterBase(object):
"""
base class to save data structure
for each fitting parameter
"""
def __init__(self):
self.val = None
self.min = None
self.max = None
return
class Parameters(object):
def __init__(self):
self.p_dict = {}
return
def add(self, **kwgs):
if kwgs.has_key('name'):
self.p_dict[kwgs['name']] = ParameterBase()
if kwgs.has_key('val'):
self.p_dict[kwgs['name']].val = kwgs['val']
if kwgs.has_key('min'):
self.p_dict[kwgs['name']].min = kwgs['min']
if kwgs.has_key('max'):
self.p_dict[kwgs['name']].max = kwgs['max']
else:
print "please define parameter name first."
print "please define parameters as %s, %s, %s, %s" \
%('name', 'val', 'min', 'max')
return
def __getitem__(self, name):
return self.p_dict[name]
def all(self):
return self.p_dict | Python | 0 | |
17de6f90ce081984cab528526fcf9d9e7008be14 | Create beta_scraping_get_users_honor.py | Solutions/beta/beta_scraping_get_users_honor.py | Solutions/beta/beta_scraping_get_users_honor.py | from bs4 import BeautifulSoup as BS
from urllib.request import urlopen
Url = 'https://www.codewars.com/users/leaderboard'
def get_honor(username):
html = urlopen(Url).read().decode('utf-8')
soup = BS(html, 'html.parser')
for i in soup.find_all('tr'):
try:
a = str(i).split('</td>')
user = a[0][19:(a[0].find('>')-1)]
if user == username:
return int(a[-2][4:])
except:
continue
return "Username not found!"
| Python | 0.000001 | |
8c737c22ae5d896f5445995660d664d959ce1c08 | add ctc reader | fluid/ocr_recognition/ctc_reader.py | fluid/ocr_recognition/ctc_reader.py | import os
import cv2
import numpy as np
from paddle.v2.image import load_image
class DataGenerator(object):
def __init__(self):
pass
def train_reader(self, img_root_dir, img_label_list):
'''
Reader interface for training.
:param img_root_dir: The root path of the image for training.
:type file_list: str
:param img_label_list: The path of the <image_name, label> file for training.
:type file_list: str
'''
# sort by height, e.g. idx
img_label_lines = []
for line in open(img_label_list):
# h, w, img_name, labels
items = line.split(' ')
idx = "{:0>5d}".format(int(items[0]))
img_label_lines.append(idx + ' ' + line)
img_label_lines.sort()
def reader():
for line in img_label_lines:
# h, w, img_name, labels
items = line.split(' ')[1:]
assert len(items) == 4
label = [int(c) for c in items[-1].split(',')]
img = load_image(os.path.join(img_root_dir, items[2]))
img = np.transpose(img, (2, 0, 1))
#img = img[np.newaxis, ...]
yield img, label
return reader
def test_reader(self, img_root_dir, img_label_list):
'''
Reader interface for inference.
:param img_root_dir: The root path of the images for training.
:type file_list: str
:param img_label_list: The path of the <image_name, label> file for testing.
:type file_list: list
'''
def reader():
for line in open(img_label_list):
# h, w, img_name, labels
items = line.split(' ')
assert len(items) == 4
label = [int(c) for c in items[-1].split(',')]
img = load_image(os.path.join(img_root_dir, items[2]))
img = np.transpose(img, (2, 0, 1))
#img = img[np.newaxis, ...]
yield img, label
return reader
| Python | 0 | |
90c7f90a8d409fd68ebe20ed4ac35fd378abfee5 | Create flush.py | flush.py | flush.py | f = open('out.log', 'w+')
f.write('output is ')
# some work
s = 'OK.'
f.write(s)
f.write('\n')
f.flush()
# some other work
f.write('done\n')
f.flush()
f.close()
| Python | 0.000004 | |
ea11ae8919139eae8eaa6b9b1dfe256726d3c584 | Copy SBSolarcell tests into individual file | test/test_SBSolarcell.py | test/test_SBSolarcell.py | # -*- coding: utf-8 -*-
import numpy as np
import ibei
from astropy import units
import unittest
temp_sun = 5762.
temp_earth = 288.
bandgap = 1.15
input_params = {"temp_sun": temp_sun,
"temp_planet": temp_earth,
"bandgap": bandgap,
"voltage": 0.5,}
class CalculatorsReturnUnits(unittest.TestCase):
"""
Tests units of the calculator methods returned values.
"""
def setUp(self):
"""
Initialize SBSolarcell object from input_params
"""
self.solarcell = ibei.SQSolarcell(input_params)
def test_calc_blackbody_radiant_power_density(self):
"""
calc_blackbody_radiant_power_density should return value with unit of W m^-2.
"""
tested_unit = self.solarcell.calc_blackbody_radiant_power_density().unit
target_unit = units.Unit("W/m2")
self.assertEqual(tested_unit, target_unit)
def test_calc_power_density(self):
"""
calc_power_density should return value with unit of W m^-2.
"""
tested_unit = self.solarcell.calc_power_density().unit
target_unit = units.Unit("W/m2")
self.assertEqual(tested_unit, target_unit)
def test_calc_power_density_zero_bandgap(self):
"""
calc_power_density should return value with unit of W m^-2.
"""
self.solarcell.bandgap = 0
tested_unit = self.solarcell.calc_power_density().unit
target_unit = units.Unit("W/m2")
self.assertEqual(tested_unit, target_unit)
class CalculatorsReturnType(unittest.TestCase):
"""
Tests type of the calculator methods returned values.
"""
def setUp(self):
"""
Initialize SBSolarcell object from input_params
"""
self.solarcell = ibei.SQSolarcell(input_params)
def test_calc_efficiency(self):
"""
calc_power_density should return value with unit of W m^-2.
"""
self.assertIsInstance(self.solarcell.calc_efficiency(), float)
class CalculatorsReturnValue(unittest.TestCase):
"""
Tests special values of the calculator methods.
"""
def setUp(self):
"""
Initialize SBSolarcell object from input_params
"""
self.solarcell = ibei.SQSolarcell(input_params)
def test_calc_power_density(self):
"""
calc_power_density should return 0 when bandgap = 0.
"""
self.solarcell.bandgap = 0
self.assertEqual(0, self.solarcell.calc_power_density())
if __name__ == "__main__":
pass
| Python | 0 | |
a973b1daca340031c671070e0f102a6114f58fab | add files | mysite/wordclips/ventriloquy/test_ventriloquy.py | mysite/wordclips/ventriloquy/test_ventriloquy.py | from django.test import TestCase
from wordclips.ventriloquy.ventriloquy import Ventriloquy
from wordclips.models import Wordclip
class VentriloquyTestCase(TestCase):
def setUp(self):
self.ventriloquy = Ventriloquy()
# Put dummy object in databse for testing purpose
Wordclip.objects.create(name="how")
Wordclip.objects.create(name="are")
Wordclip.objects.create(name="you")
Wordclip.objects.create(name="people")
def test_found_in_db(self):
err, lst = self.ventriloquy.check_words(["how", "are", "you"])
o1 = Wordclip.objects.get(name="how")
o2 = Wordclip.objects.get(name="are")
o3 = Wordclip.objects.get(name="you")
self.assertEqual(err, 0)
self.assertEqual(lst, [o1, o2, o3])
def test_not_found_in_db(self):
"""
Test objects not being found in the database,
the first word that can not be found will be returned
"""
err, lst = self.ventriloquy.check_words(["how", "shooot"])
self.assertEqual(err, -1)
self.assertEqual(lst, "shooot")
def test_creating_audio_success(self):
"""
Test audio being successfully created
"""
err, lst = self.ventriloquy.create_audio(["how", "are", "you", "people"])
self.assertEqual(err, 0)
self.assertEqual(lst, [])
def test_creating_audio_failed(self):
"""
Test audio created failed
"""
err, lst = self.ventriloquy.create_audio(["how", "are", "you", "people", "damn", "it"])
self.assertEqual(err, -1)
self.assertEqual(lst, "damn")
| Python | 0.000002 | |
8fd466ecd16db736177104902eb84f661b2b62cc | Create sitemap for google news | opps/sitemaps/googlenews.py | opps/sitemaps/googlenews.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.contrib.sitemaps import GenericSitemap
from django.contrib.sites.models import Site
class GoogleNewsSitemap(GenericSitemap):
# That's Google News limit. Do not increase it!
limit = 1000
sitemap_template = 'sitemap_googlenews.xml'
def get_urls(self, page=1, site=None):
if site is None:
site = Site.objects.get_current()
sup = super(GoogleNewsSitemap, self)
old_urls = sup.get_urls(page, site)
urls = []
for item in self.paginator.page(page).object_list:
for url in old_urls:
loc = "http://%s%s" % (site.domain, self.location(item))
if url.get('location') == loc:
old_urls.remove(url)
url['item'] = item
urls.append(url)
return urls
| Python | 0.000095 | |
2a106a12db2a59ccb0517a13db67b35f475b3ef5 | Add args to survey_data url | apps/survey/urls.py | apps/survey/urls.py | from django.conf.urls.defaults import *
from . import views
urlpatterns = patterns('',
url(r'^profile/$', views.profile_index, name='survey_profile'),
url(r'^profile/electric/$', views.profile_electric, name='survey_profile_electric'),
url(r'^main/$', views.main_index),
url(r'^group_management/$', views.group_management, name='group_management'),
url(r'^survey_management/$', views.survey_management, name='survey_management'),
url(r'^survey_data/(?P<survey_shortname>.+)/(?P<id>\d+)/$', views.survey_data, name='survey_data'),
#url(r'^survey_data/(?P<survey_shortname>.+)/$', views.survey_data, name='survey_data'),
url(r'^thanks_profile/$', views.thanks_profile, name='profile_thanks'),
url(r'^$', views.index, name='survey_index'),
)
| from django.conf.urls.defaults import *
from . import views
urlpatterns = patterns('',
url(r'^profile/$', views.profile_index, name='survey_profile'),
url(r'^profile/electric/$', views.profile_electric, name='survey_profile_electric'),
url(r'^main/$', views.main_index),
url(r'^group_management/$', views.group_management, name='group_management'),
url(r'^survey_management/$', views.survey_management, name='survey_management'),
url(r'^survey_data/$', views.survey_data, name='survey_management'),
url(r'^thanks_profile/$', views.thanks_profile, name='profile_thanks'),
url(r'^$', views.index, name='survey_index'),
)
| Python | 0.000002 |
b85b8915b73433f74d8ee5c6f6ef9f88d8b82bd8 | add original py script | genSongbook.py | genSongbook.py | #!/usr/bin/python
import os
f = open('songbook.tex', 'w')
s = """% songbook.tex
%\documentclass[11pt,a4paper]{article} % article format
\documentclass[11pt,a4paper,openany]{book} % book format
\usepackage[margin=0.7in]{geometry}
%\usepackage[utf8]{inputenc} % tildes
\usepackage{graphics}
\usepackage[dvips]{graphicx}
\usepackage{hyperref}
\usepackage{verbatim}
% next if more than 100 chapters.
\usepackage{titletoc}
\\titlecontents{chapter}[2.5em]
{\\addvspace{0.5pc}\\bfseries}
{\contentslabel{2em}}
{}
{\\titlerule*[0.3pc]{.}\contentspage}
\hypersetup{
pdftitle={Songbook (English) - Summer 2014},
pdfsubject={Songbook (English) - Summer 2014},
pdfauthor={jgvictores},
pdfkeywords={songbook} {english} {summer} {2014},
colorlinks,
citecolor=black,
filecolor=black,
linkcolor=black,
urlcolor=black,
bookmarks
}
\makeatletter
\\renewcommand{\@makechapterhead}[1]{%
{\setlength{\parindent}{0pt} \\raggedright \\normalfont
\\bfseries S-\\thechapter.\ #1
\par\\nobreak\\vspace{10 pt}}}
\makeatother
\\begin{document}
\Large
\\title{Songbook (English)}
\\author{by -j}
\date{Summer 2014}
\maketitle
\cleardoublepage
\\tableofcontents
\\newpage % book format
%-- To force blank page:
%\\newpage
%\\thispagestyle{empty}
%\\mbox{}
"""
for dirname, dirnames, filenames in os.walk('/opt/Dropbox/lyrics/english'):
for filename in sorted(filenames):
s += "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n"
name, extension = os.path.splitext(filename)
s += "\chapter{" + name + "}\n"
s += "\\begin{verbatim}\n"
song = open( os.path.join(dirname, filename) )
s += song.read()
s += "\\end{verbatim}\n"
s += "\n"
s += """
\end{document}
"""
f.write(s)
| Python | 0 | |
ac5b1181ff73b9d12c09731a646dac7fa23c342b | Add Weatherbit module | desertbot/modules/commands/weather/Weatherbit.py | desertbot/modules/commands/weather/Weatherbit.py | from collections import OrderedDict
from datetime import datetime
from twisted.plugin import IPlugin
from zope.interface import implementer
from desertbot.moduleinterface import IModule
from desertbot.modules.commands.weather.BaseWeatherCommand import BaseWeatherCommand, getFormattedWeatherData, \
getFormattedForecastData
@implementer(IPlugin, IModule)
class Weatherbit(BaseWeatherCommand):
weatherBaseURL = "https://api.weatherbit.io/v2.0"
def __init__(self):
subCommands = OrderedDict([
('weather', self.getWeather),
('forecast', self.getForecast)]
)
BaseWeatherCommand.__init__(self, "Weatherbit", subCommands)
def triggers(self):
return ["weatherbit"]
def getWeather(self, location) -> str:
return self._handleCommand("weather", location)
def getForecast(self, location) -> str:
return self._handleCommand("forecast", location)
def _handleCommand(self, subCommand, location) -> str:
request = subCommand
params = {
"lat": location["latitude"],
"lon": location["longitude"],
"units": "M",
"key": self.apiKey
}
if subCommand == "weather":
request = "current"
if subCommand == "forecast":
request = "forecast/daily"
params["days"] = 4
url = "{}/{}".format(self.weatherBaseURL, request)
result = self.bot.moduleHandler.runActionUntilValue("fetch-url", url, params)
output = None
if not result:
output = "No weather for this location could be found at this moment. Try again later."
else:
j = result.json()
if "data" not in j or "count" in j and j["count"] == 0:
output = "The Weatherbit API returned an unknown reply."
else:
if subCommand == "weather":
output = _parseWeather(j)
elif subCommand == "forecast":
output = _parseForecast(j)
return output
def _parseWeather(json):
data = json["data"][0]
weatherData = {
"weatherCode": data["weather"]["code"],
"description": data["weather"]["description"],
"tempC": data["temp"],
"humidity": data["rh"],
"windSpeedMs": data["wind_spd"],
"timestamp": data["ts"],
"windDir": data["wind_dir"]
}
return getFormattedWeatherData(weatherData)
def _parseForecast(json):
daysList = json["data"]
forecastData = []
for day in daysList:
forecastData.append({
"date": datetime.fromtimestamp(day['ts']).strftime("%A"),
"minC": day["low_temp"],
"maxC": day["max_temp"],
"weatherCode": day["weather"]["code"],
"description": day["weather"]["description"],
})
return getFormattedForecastData(forecastData)
weatherbitCommand = Weatherbit()
| Python | 0 | |
8822eba1c4351f8cc575fdb33c15bcd6a27bf21c | allow for nodePort to be None in case of ClusterIP | kubernetes/models/v1/ServicePort.py | kubernetes/models/v1/ServicePort.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is subject to the terms and conditions defined in
# file 'LICENSE.md', which is part of this source code package.
#
from kubernetes.utils import is_valid_string
class ServicePort(object):
VALID_PROTOCOLS = ['TCP', 'UDP']
def __init__(self, name=None, model=None):
super(ServicePort, self).__init__()
self._name = None
self._protocol = None
self._port = None
self._target_port = None
self._node_port = None
if name is not None:
self.name = name
if model is not None:
if 'name' in model:
self.name = model['name']
if 'protocol' in model:
self.protocol = model['protocol']
if 'port' in model:
self.port = model['port']
if 'targetPort' in model:
self.target_port = model['targetPort']
if 'nodePort' in model:
self.node_port = model['nodePort']
# ------------------------------------------------------------------------------------- name
@property
def name(self):
return self._name
@name.setter
def name(self, name=None):
if not is_valid_string(name):
raise SyntaxError('ServicePort: name: [ {} ] is invalid.'.format(name))
self._name = name
# ------------------------------------------------------------------------------------- protocol
@property
def protocol(self):
return self._protocol
@protocol.setter
def protocol(self, protocol=None):
if not is_valid_string(protocol) or protocol.upper() not in ServicePort.VALID_PROTOCOLS:
raise SyntaxError('ServicePort: protocol: [ {} ] is invalid.'.format(protocol))
self._protocol = protocol.upper()
# ------------------------------------------------------------------------------------- port
@property
def port(self):
return self._port
@port.setter
def port(self, port=None):
if isinstance(port, str) and port.isdigit():
port = int(port)
if not isinstance(port, int):
raise SyntaxError('ServicePort: port: [ {} ] is invalid.'.format(port))
self._port = port
# ------------------------------------------------------------------------------------- targetPort
@property
def target_port(self):
return self._target_port
@target_port.setter
def target_port(self, port=None):
msg = 'ServicePort: target_port: [ {} ] is invalid.'.format(port)
try:
p = int(port)
except ValueError:
if not is_valid_string(port):
raise SyntaxError(msg)
p = port
except TypeError:
raise SyntaxError(msg)
self._target_port = p
# ------------------------------------------------------------------------------------- nodePort
@property
def node_port(self):
return self._node_port
@node_port.setter
def node_port(self, port=None):
if port is not None and isinstance(port, str) and port.isdigit():
port = int(port)
if port is not None and not isinstance(port, int):
raise SyntaxError('ServicePort: node_port: [ {} ] is invalid.'.format(port))
self._node_port = port
# ------------------------------------------------------------------------------------- serialize
def serialize(self):
data = {}
if self.name is not None:
data['name'] = self.name
if self.protocol is not None:
data['protocol'] = self.protocol
if self.port is not None:
data['port'] = self.port
if self.target_port is not None:
data['targetPort'] = self.target_port
if self.node_port is not None:
data['nodePort'] = self.node_port
return data
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is subject to the terms and conditions defined in
# file 'LICENSE.md', which is part of this source code package.
#
from kubernetes.utils import is_valid_string
class ServicePort(object):
VALID_PROTOCOLS = ['TCP', 'UDP']
def __init__(self, name=None, model=None):
super(ServicePort, self).__init__()
self._name = None
self._protocol = None
self._port = None
self._target_port = None
self._node_port = None
if name is not None:
self.name = name
if model is not None:
if 'name' in model:
self.name = model['name']
if 'protocol' in model:
self.protocol = model['protocol']
if 'port' in model:
self.port = model['port']
if 'targetPort' in model:
self.target_port = model['targetPort']
if 'nodePort' in model:
self.node_port = model['nodePort']
# ------------------------------------------------------------------------------------- name
@property
def name(self):
return self._name
@name.setter
def name(self, name=None):
if not is_valid_string(name):
raise SyntaxError('ServicePort: name: [ {} ] is invalid.'.format(name))
self._name = name
# ------------------------------------------------------------------------------------- protocol
@property
def protocol(self):
return self._protocol
@protocol.setter
def protocol(self, protocol=None):
if not is_valid_string(protocol) or protocol.upper() not in ServicePort.VALID_PROTOCOLS:
raise SyntaxError('ServicePort: protocol: [ {} ] is invalid.'.format(protocol))
self._protocol = protocol.upper()
# ------------------------------------------------------------------------------------- port
@property
def port(self):
return self._port
@port.setter
def port(self, port=None):
if isinstance(port, str) and port.isdigit():
port = int(port)
if not isinstance(port, int):
raise SyntaxError('ServicePort: port: [ {} ] is invalid.'.format(port))
self._port = port
# ------------------------------------------------------------------------------------- targetPort
@property
def target_port(self):
return self._target_port
@target_port.setter
def target_port(self, port=None):
msg = 'ServicePort: target_port: [ {} ] is invalid.'.format(port)
try:
p = int(port)
except ValueError:
if not is_valid_string(port):
raise SyntaxError(msg)
p = port
except TypeError:
raise SyntaxError(msg)
self._target_port = p
# ------------------------------------------------------------------------------------- nodePort
@property
def node_port(self):
return self._node_port
@node_port.setter
def node_port(self, port=None):
if isinstance(port, str) and port.isdigit():
port = int(port)
if not isinstance(port, int):
raise SyntaxError('ServicePort: node_port: [ {} ] is invalid.'.format(port))
self._node_port = port
# ------------------------------------------------------------------------------------- serialize
def serialize(self):
data = {}
if self.name is not None:
data['name'] = self.name
if self.protocol is not None:
data['protocol'] = self.protocol
if self.port is not None:
data['port'] = self.port
if self.target_port is not None:
data['targetPort'] = self.target_port
if self.node_port is not None:
data['nodePort'] = self.node_port
return data
| Python | 0.000398 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.