commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13
values | lang stringclasses 23
values |
|---|---|---|---|---|---|---|---|---|
8c297e4403e840022b8b0db65dc098858acf27d7 | refactor by removing duplication. | pheanex/xpython,smalley/python,exercism/xpython,jmluy/xpython,outkaj/xpython,orozcoadrian/xpython,smalley/python,rootulp/xpython,mweb/python,exercism/python,N-Parsons/exercism-python,mweb/python,jmluy/xpython,outkaj/xpython,oalbe/xpython,N-Parsons/exercism-python,rootulp/xpython,behrtam/xpython,pheanex/xpython,behrtam/xpython,orozcoadrian/xpython,oalbe/xpython,exercism/xpython,de2Zotjes/xpython,exercism/python,de2Zotjes/xpython | nucleotide-count/example.py | nucleotide-count/example.py | NUCLEOTIDES = 'ATCG'
def count(strand, abbreviation):
_validate(abbreviation)
return strand.count(abbreviation)
def nucleotide_counts(strand):
return {
abbr: strand.count(abbr)
for abbr in NUCLEOTIDES
}
def _validate(abbreviation):
if abbreviation not in NUCLEOTIDES:
raise ValueError('%s is not a nucleotide.' % abbreviation)
| NUCLEOTIDES = 'ATCGU'
def count(strand, abbreviation):
_validate(abbreviation)
return strand.count(abbreviation)
def nucleotide_counts(strand):
return {
abbr: strand.count(abbr)
for abbr in 'ATGC'
}
def _validate(abbreviation):
if abbreviation not in NUCLEOTIDES:
raise ValueError('%s is not a nucleotide.' % abbreviation)
| mit | Python |
6bf04c596670bcb7b00c79f9b8f42c9fcd37cc76 | Create hourly_entries.py | disfear86/Data-Analysis | Udacity_Data_Analysis/hourly_entries.py | Udacity_Data_Analysis/hourly_entries.py | import pandas as pd
# Cumulative entries and exits for one station for a few hours.
entries_and_exits = pd.DataFrame({
'ENTRIESn': [3144312, 3144335, 3144353, 3144424, 3144594,
3144808, 3144895, 3144905, 3144941, 3145094],
'EXITSn': [1088151, 1088159, 1088177, 1088231, 1088275,
1088317, 1088328, 1088331, 1088420, 1088753]
})
def get_hourly_entries_and_exits(data):
'''
takes a DataFrame with cumulative entries
and exits, returns a DataFrame with hourly entries and exits.
'''
return (data - data.shift(periods=1)).drop(0)
print get_hourly_entries_and_exits(entries_and_exits)
| mit | Python | |
6475560c20cc53e8877f246b075a7e432271f694 | Create auth.py | alexiskulash/ia-caucus-sentiment | src/auth.py | src/auth.py | class TwitterAuth:
consumer_key="#"
consumer_secret="#"
access_token="#"
access_token_secret="#"
| mit | Python | |
55689cc63b321803e33da2029f2a1d34467d435d | Add migration | softwaresaved/fat,softwaresaved/fat,softwaresaved/fat,softwaresaved/fat | fellowms/migrations/0052_merge.py | fellowms/migrations/0052_merge.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-08-04 14:37
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('fellowms', '0051_event_required_blog_posts'),
('fellowms', '0051_auto_20160804_1425'),
]
operations = [
]
| bsd-3-clause | Python | |
c0b37b879b2e20ee71663e64be76ad11c1e1794c | Add a script that can be used to compare asyncio.sleep to time.sleep | showa-yojyo/bin,showa-yojyo/bin | async/3.7/basic/compsleep.py | async/3.7/basic/compsleep.py | #!/usr/bin/env python
"""https://docs.python.org/3.7/library/asyncio-task.html 変奏版
Features:
- asyncio.gather()
- asyncio.sleep()
- asyncio.run()
"""
import asyncio
import logging
import time
concurrent = 3
delay = 5
# PYTHONASYNCIODEBUG=1
logging.basicConfig(level=logging.DEBUG)
async def async_pause():
await asyncio.sleep(delay)
return 0
async def sync_pause():
time.sleep(delay)
return 0
async def main():
"""Schedule three calls *concurrently*"""
tasks = [
async_pause() for _ in range(concurrent)]
await asyncio.gather(*tasks)
tasks =[
sync_pause() for _ in range(concurrent)]
await asyncio.gather(*tasks)
# PYTHONASYNCIODEBUG=1
asyncio.run(main(), debug=True)
| mit | Python | |
1128ed88d9e92f76629dc7b9881e92ea89024a6b | add one python file | DivadOEC/HStudyDocs | pyStudy/Investment.py | pyStudy/Investment.py | # encoding: utf-8
import datetime,time;
INVS_REPAY_BY_MONTH = 1
INVS_REPAY_BY_DAYS = 2
class RepayMent(object):
def __init__(self,repayDate,repayTerm,repayAmout)
self.repayDate = repayDate
self.repayTerm = repayTerm # when INVS_REPAY_BY_MONTH,set -1
self.repayAmout = repayAmout
class Investment(object):
def __init__(self,invsDate,invsAmout,invsRatio,invsTerm,invsRepayWay):
self.invsDate = invsDate
self.invsAmout = invsAmout
self.invsRatio = invsRatio
self.invsTerm = invsTerm
self.invsRepayWay = invsRepayWay
## 还款计划表,默认为空
self.invsRepayList = [];
## Calculate the investment's end-time & totalRepayAmout
if self.invsRepayWay == INVS_REPAY_BY_MONTH:
if invsDate.month + invsTerm > 12:
self.invsFiniDate = datetime.date(invsDate.year+1, invsDate.Month+invsTerm-12, invsDate.day)
else:
self.invsFiniDate = datetime.date(invsDate.year, invsDate.Month+invsTerm, invsDate.day)
self.totalRepayAmout = invsAmout * invsRatio * invsTerm / 12
else:
self.invsFiniDate = self.invsDate + datetime.timedelta(invsTerm)
self.totalRepayAmout = invsAmout * invsRatio * invsTerm / 365
## 生产还款计划表
def GenRepayList(self):
if self.invsRepayWay == INVS_REPAY_BY_MONTH:
if invsDate.month = 12:
self.f1RepayDate = datetime.date(invsDate.year+1, 1, invsDate.day)
else:
self.f1RepayDate = datetime.date(invsDate.year, invsDate.Month+1, invsDate.day)
for x in range(invsTerm):
if f1RepayDate.month+x <=12:
repayDate = datetime.date(f1RepayDate.year,f1RepayDate.month+x, invsDate.day)
else:
repayDate = datetime.date(f1RepayDate.year+1,f1RepayDate.month+x-12, invsDate.day)
# self.totalRepayAmout = invsAmout * invsRatio * invsTerm / 12
repayAmout = totalRepayAmout / invsTerm
self.invsRepayList.append(RepayMent(repayDate, -1, repayAmout))
else:
if invsDate.month = 12:
self.f1RepayDate = datetime.date(invsDate.year+1, 1, 20)
else:
self.f1RepayDate = datetime.date(invsDate.year, invsDate.Month+1, 20)
## 判断是否一次性派息
payTimes = (invsFiniDate.year - f1RepayDate.year)*12 + invsFiniDate.month - f1RepayDate.month + 1
if payTimes == 1:
#结构化业务数据后放入容器中,便于迭代遍历
self.invsRepayList.append(RepayMent(invsFiniDate, invsTerm, totalRepayAmout))
print '一次性派息,派息日: ', invsFiniDate, ',计息天数:', invsTerm, ',利息:', totalRepayAmout
else:
#print payTimes,'程序猿哥哥看片去了...'
lastPayDate = invsDate
for x in range(payTimes-1):
# 计算派息日&派息金额
# timedelta可以查看:天数(days),秒数 (seconds)
#repayDate = f1RepayDate + datetime.timedelta(months=x)
if f1RepayDate.month+x <=12:
repayDate = datetime.date(f1RepayDate.year, f1RepayDate.month+x, 20)
else:
repayDate = datetime.date(f1RepayDate.year+1, f1RepayDate.month+x-12, 20)
repayDays = (repayDate - lastPayDate).days
repayAmout = invsAmout * invsRatio * repayDays / 365
self.invsRepayList.append(RepayMent(repayDate, repayDays, repayAmout))
#print x, repayDate, repayAmout
lastPayDate = repayDate
# 最后一期派息
repayDays = (invsFiniDate - lastPayDate).days
repayAmout = invsAmout * invsRatio * repayDays / 365
self.invsRepayList.append(RepayMent(invsFiniDate, repayDays, repayAmout))
len = len(invsRepayList)
n = 0
while n<len:
print n, '多次派息,派息日: ', invsRepayList[n].repayDate, ',计息天数:', invsRepayList[n].repayTerm, ',利息:', invsRepayList[n].repayAmout
n = n + 1
print '投资本金:', invsAmout, ';累计总收益:', totalRepayAmout
| mit | Python | |
505ee67af2a3c8a9c8725d7506725c3109a0891d | add drivetimer | tigfox/legendary-sniffle | drivetimer/scratch.py | drivetimer/scratch.py | # -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import requests
stockSymbol = 'NASDAQ:ETSY' #string as MARKET:STOCK see alphavantage API guide for details
alphaVantageAPI = '3R81P8I8EK4HV3ES' #alphavantage API key
#==============================================================================
#r = requests.get('https://maps.googleapis.com/maps/api/directions/json?origin=Brooklyn,NY&destination=Oyster+Bay,NY&key=AIzaSyCriePfKNU2Rkb-bCwcT9ESHLiAA-vVpDc')
#for routes in r.json()['routes']:
# for legs in routes['legs']:
# duration = legs['duration']
# if duration['value'] <= 3600:
# print("Go Home Right Meow. It'll take you " + str(duration['value']/60) + " minutes.")
#
#==============================================================================
#==============================================================================
# r = requests.get('https://www.alphavantage.co/query?function=GLOBAL_QUOTE&symbol=ETSY&apikey=3R81P8I8EK4HV3ES')
# symbol = r.json()['Realtime Global Securities Quote']['01. Symbol']
# price = r.json()['Realtime Global Securities Quote']['03. Latest Price']
# openPrice = r.json()['Realtime Global Securities Quote']['04. Open (Current Trading Day)']
# direction = price >= openPrice
#==============================================================================
def get_stockprice():
stockURL = 'https://www.alphavantage.co/query?function=GLOBAL_QUOTE&symbol=' + stockSymbol + '&apikey=' + alphaVantageAPI
r = requests.get(stockURL)
symbol = r.json()['Realtime Global Securities Quote']['01. Symbol']
price = r.json()['Realtime Global Securities Quote']['03. Latest Price']
openPrice = r.json()['Realtime Global Securities Quote']['04. Open (Current Trading Day)']
direction = price >= openPrice
print(price)#, direction)
get_stockprice()
| mit | Python | |
e6dff5f5eac3e5ce93ed925374a27abe53eeb1a7 | Add dictlike: dictionary like object for JSON wrappers | bwesterb/sarah | src/dictlike.py | src/dictlike.py | class DictLike(object):
""" Base class for a dictionary based object.
Think about wrappers around JSON data. """
def __init__(self, data):
object.__setattr__(self, '_data', data)
def __getattr__(self, name):
try:
return self._data[name]
except KeyError:
raise AttributeError
def __setattr__(self, name, value):
self._data[name] = value
def __delattr__(self, name):
del self._data[name]
def to_dict(self):
return self._data
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, repr(self._data))
class AliasingDictLike(DictLike):
""" A DictLike where keys have aliases provided by __class__.aliases
"""
aliases = {'a':'aaa'}
def __init__(self, data):
_data = {}
aliases = type(self).aliases
for k, v in data.iteritems():
if k in aliases:
k = aliases[k]
_data[k] = v
super(AliasingDictLike, self).__init__(_data)
def __getattr__(self, name):
if name in type(self).aliases:
name = type(self).aliases[name]
try:
return self._data[name]
except KeyError:
raise AttributeError
def __setattr__(self, name, value):
if name in type(self).aliases:
name = type(self).aliases[name]
self._data[name] = value
def __delattr__(self, name):
if name in type(self).aliases:
name = type(self).aliases[name]
del self._data[name]
| agpl-3.0 | Python | |
cad2f9363e30f13c5f10f56c8cf5d0824c1223ac | Add tests for source to sink simulation, which was originally developed in cycamore repository | gidden/cyclus,rwcarlsen/cyclus,hodger/cyclus,gidden/cyclus,hodger/cyclus,gidden/cyclus,rwcarlsen/cyclus,Baaaaam/cyclus,hodger/cyclus,rwcarlsen/cyclus,mbmcgarry/cyclus,rwcarlsen/cyclus,mbmcgarry/cyclus,mbmcgarry/cyclus,Baaaaam/cyclus,gidden/cyclus,mbmcgarry/cyclus,hodger/cyclus,hodger/cyclus,Baaaaam/cyclus | integration_tests/test_source_to_sink.py | integration_tests/test_source_to_sink.py | #! /usr/bin/python
from nose.tools import assert_equal, assert_true
from numpy.testing import assert_array_equal
import os
import tables
import numpy as np
from tools import check_cmd
""" Tests """
def test_source_to_sink():
""" Tests linear growth of sink inventory """
#Cyclus simulation input for source_to_sink
sim_inputs = ["./inputs/source_to_sink/source_to_sink.xml",
"./inputs/source_to_sink/source_to_sink_limited_lifetime.xml"]
for sim_input in sim_inputs:
holdsrtn = [1] # needed because nose does not send() to test generator
cmd = ["cyclus", "-o", "./output_temp.h5", "--input-file", sim_input]
check_cmd(cmd, '.', holdsrtn)
rtn = holdsrtn[0]
if rtn != 0:
return # don't execute further commands
output = tables.open_file("./output_temp.h5", mode = "r")
# tables of interest
paths = ["/Agents", "/Resources", "/Transactions","/TransactedResources"]
# Check if these tables exist
for path in paths:
yield assert_true, output.__contains__(path)
# Get specific tables and columns
agents = output.get_node("/Agents")[:]
resources = output.get_node("/Resources")[:]
transactions = output.get_node("/Transactions")[:]
transacted_res = output.get_node("/TransactedResources")[:]
# Find agent ids of source and sink facilities
agent_ids = agents["ID"]
agent_prototypes = agents["Prototype"]
source_index = []
sink_index = []
i = 0
for prototype in agent_prototypes:
if prototype == "Source":
source_index.append(i)
elif prototype == "Sink":
sink_index.append(i)
i += 1
# Test for only one source and one sink
yield assert_equal, len(source_index), 1
yield assert_equal, len(sink_index), 1
# Get ids of the source and sink
source_id = agent_ids[source_index[0]]
sink_id = agent_ids[sink_index[0]]
# Check if transactions are only between source and sink
sender_ids = transactions["SenderID"]
receiver_ids = transactions["ReceiverID"]
expected_sender_array = np.empty(sender_ids.size)
expected_sender_array.fill(source_id)
expected_receiver_array = np.empty(receiver_ids.size)
expected_receiver_array.fill(sink_id)
yield assert_array_equal, sender_ids, expected_sender_array
yield assert_array_equal, receiver_ids, expected_receiver_array
# Transaction ids must be equal range from 1 to the number of rows
# from both Transactions and TransactedResources tables
expected_trans_ids = np.arange(1, sender_ids.size + 1, 1)
yield assert_array_equal, transactions["ID"], expected_trans_ids
yield assert_array_equal, transacted_res["TransactionID"], expected_trans_ids
# Track transacted resources
trans_resource_ids = transacted_res["ResourceID"]
quantities = resources["Quantity"]
transacted_quantities = np.zeros(trans_resource_ids.size)
expected_quantities = np.empty(trans_resource_ids.size)
# Expect that every transaction quantity is the same amount
expected_quantities.fill(quantities[trans_resource_ids[0] - 1])
j = 0
for id in trans_resource_ids:
transacted_quantities[j] = quantities[id-1]
j += 1
yield assert_array_equal, transacted_quantities, expected_quantities
output.close()
os.remove("./output_temp.h5")
| bsd-3-clause | Python | |
eaf5d72ed584e67619100bb7cdb7deebd9506614 | Add `appengine_config.py` | LuizGsa21/p4-conference-central,LuizGsa21/p4-conference-central,LuizGsa21/p4-conference-central | appengine_config.py | appengine_config.py |
def webapp_add_wsgi_middleware(app):
"""" Wrap WSGI application with the appstats middleware. """
from google.appengine.ext.appstats import recording
return recording.appstats_wsgi_middleware(app)
| apache-2.0 | Python | |
ee1df360979ec24fd8233210372eedd3071cee87 | Add test file to check consistent use of default arguments | mgeier/PySoundFile | tests/test_argspec.py | tests/test_argspec.py | """Make sure that arguments of open/read/write don't diverge"""
import pysoundfile as sf
from inspect import getargspec
open = getargspec(sf.open)
init = getargspec(sf.SoundFile.__init__)
read_function = getargspec(sf.read)
read_method = getargspec(sf.SoundFile.read)
write_function = getargspec(sf.write)
def defaults(spec):
return dict(zip(reversed(spec.args), reversed(spec.defaults)))
def test_if_open_is_identical_to_init():
assert ['self'] + open.args == init.args
assert open.varargs == init.varargs
assert open.keywords == init.keywords
assert open.defaults == init.defaults
def test_read_function():
func_defaults = defaults(read_function)
meth_defaults = defaults(read_method)
open_defaults = defaults(open)
# Not meaningful in read() function:
del open_defaults['mode']
# Only in read() function:
del func_defaults['start']
del func_defaults['stop']
# Same default values as open() and SoundFile.read():
for spec in open_defaults, meth_defaults:
for arg, default in spec.items():
assert (arg, func_defaults[arg]) == (arg, default)
del func_defaults[arg]
assert not func_defaults # No more arguments should be left
def test_write_function():
write_defaults = defaults(write_function)
open_defaults = defaults(open)
# Same default values as open():
for arg, default in write_defaults.items():
assert (arg, open_defaults[arg]) == (arg, default)
del open_defaults[arg]
del open_defaults['mode'] # mode is always 'w'
del open_defaults['channels'] # Inferred from data
del open_defaults['sample_rate'] # Obligatory in write()
assert not open_defaults # No more arguments should be left
| bsd-3-clause | Python | |
2b99781e67e1e2e0bb3863c08e81b3cf57a5e296 | Add request bouncer test cases | menecio/django-api-bouncer | tests/test_bouncer.py | tests/test_bouncer.py | from rest_framework import status
from rest_framework.test import APITestCase
from django.contrib.auth import get_user_model
from api_bouncer.models import Api
User = get_user_model()
class BouncerTests(APITestCase):
def setUp(self):
self.superuser = User.objects.create_superuser(
'john',
'john@localhost.local',
'john123john'
)
self.example_api = Api.objects.create(
name='httpbin',
hosts=['httpbin.org'],
upstream_url='https://httpbin.org'
)
def test_bounce_api_request(self):
"""
Ensure we can bouncer a request to an api and get the same response.
"""
url = '/status/418' # teapot
self.client.credentials(HTTP_HOST='httpbin.org')
response = self.client.get(url)
self.assertEqual(response.status_code, 418)
self.assertIn('teapot', response.content.decode('utf-8'))
def test_bounce_api_request_unknown_host(self):
"""
Ensure we send a response when the hosts making the request is not
trying to call an api.
"""
url = '/test'
self.client.credentials(HTTP_HOST='the-unknown.com')
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.json(), {})
| apache-2.0 | Python | |
57378cdd87ec76751b7ad6b03fe3e46bad9b29e5 | add process test | ccbrown/needy,bittorrent/needy,bittorrent/needy,vmrob/needy,vmrob/needy,ccbrown/needy | tests/test_process.py | tests/test_process.py | import unittest
import needy.process
class ProcessTest(unittest.TestCase):
def test_list_command_output(self):
self.assertEqual('hello', needy.process.command_output(['printf', 'hello']))
def test_shell_command_output(self):
self.assertEqual('hello', needy.process.command_output('printf `printf hello`'))
| mit | Python | |
fa176de3145c1b98e31cb210c82b7367842b9b6b | add test_regular.py | rainwoodman/mpi4py_test,nickhand/runtests,nickhand/runtests,rainwoodman/runtests,rainwoodman/runtests,rainwoodman/mpi4py_test | tests/test_regular.py | tests/test_regular.py |
def test_regular():
return 0
| bsd-2-clause | Python | |
28345a0ecb92ef8e7d45fa27d082e21cc1bdd8cd | Add python solution | PlattsSEC/HackerRank,PlattsSEC/HackerRank,PlattsSEC/HackerRank,PlattsSEC/HackerRank,PlattsSEC/HackerRank,PlattsSEC/HackerRank | weighted_uniform_strings/prithaj.py | weighted_uniform_strings/prithaj.py | #!/bin/python
import sys
import string
weights = {string.lowercase[i]:i+1 for i in xrange(len(string.lowercase))}
def return_weights(a):
my_weights, prev, count = set(), '', 1
for i in a:
if i != prev:
prev = i
count = 1
else:
count +=1
my_weights.add(weights[i]*count)
return my_weights
s = raw_input().strip()
n = input()
check = return_weights(s)
for _ in xrange(n):
x = int(raw_input().strip())
if x in check:
print "Yes"
else:
print "No" | mit | Python | |
ab1bc996d477c84187df381ec77e7aaab299783b | Add test for calc_md5() function | bpipat/mws,jameshiew/mws,Bobspadger/python-amazon-mws,GriceTurrble/python-amazon-mws | tests/test_utils.py | tests/test_utils.py | from mws.mws import calc_md5
def test_calc_md5():
assert calc_md5(b'mws') == b'mA5nPbh1CSx9M3dbkr3Cyg=='
| unlicense | Python | |
048a9adbe15201ad0011776587e167725b12f624 | Add script copy_dotfiles_to_repo.py | akselsjogren/dotfiles,akselsjogren/dotfiles,akselsjogren/dotfiles | bin/copy_dotfiles_to_repo.py | bin/copy_dotfiles_to_repo.py | #!/usr/bin/env python2
# coding: utf-8
"""
Copy the dotfiles that dotbot control from home directory to repository.
This is used on Windows/msys2, where symlinks aren't supported and I want to
check in possibly local edits to the repository.
"""
from __future__ import absolute_import, print_function, unicode_literals
import argparse
import json
import logging
import os
import shutil
import sys
DOTFILES_DIR = os.path.normpath(os.path.dirname(os.path.dirname(__file__)))
DEFAULT_DOTBOT_CONFIG = os.path.join(DOTFILES_DIR, 'dotbot.conf.json')
def main():
def _loglevel(level):
try:
return getattr(logging, level.upper())
except AttributeError:
raise argparse.ArgumentTypeError('%r is not a valid log level' % level.upper())
parser = argparse.ArgumentParser()
parser.add_argument('--loglevel', type=_loglevel, default=logging.INFO,
metavar='LEVEL', help='Set log level')
parser.add_argument('-c', '--config', default=DEFAULT_DOTBOT_CONFIG, type=argparse.FileType('r'),
metavar='FILE', help='dotbot config file (default: %(default)s)')
args = parser.parse_args()
logging.basicConfig(stream=sys.stdout, level=args.loglevel,
format='%(levelname)s: %(message)s')
logging.debug('Read config file: %s', args.config)
config = json.load(args.config)
logging.debug(json.dumps(config, indent=4))
for section in config:
try:
links = section['link']
break
except KeyError:
continue
copy_files(links)
def copy_files(links):
"""Copy files from target location back to source path.
Assume that links which has a string (not dict) as value are the files that
should be copied. Links with dict are probably more complex and shouldn't
be copied.
"""
for target, source in links.items():
if isinstance(source, basestring):
from_ = os.path.expanduser(target)
to = os.path.join(DOTFILES_DIR, source)
logging.info('Copy %s => %s', from_, to)
shutil.copyfile(from_, to)
if __name__ == '__main__':
main()
| unlicense | Python | |
4ed3a5600222756fce826dbe9fb409730b0174e7 | Add init.py | vivangkumar/uberpy | uber-py/__init__.py | uber-py/__init__.py | __author__ = 'Vivan'
__version__ = '1.0.0'
'''
Specify modules to be imported.
'''
import json
try:
import httplib2
except ImportError:
print "Please ensure that the httplib2 package is installed."
| mit | Python | |
f4462b621b42ae73c9f7853b7e2dac5b730f07d3 | Implement SimProcedure rewind() | f-prettyland/angr,chubbymaggie/simuvex,tyb0807/angr,axt/angr,iamahuman/angr,axt/angr,chubbymaggie/angr,angr/simuvex,schieb/angr,angr/angr,chubbymaggie/angr,tyb0807/angr,chubbymaggie/simuvex,axt/angr,tyb0807/angr,angr/angr,f-prettyland/angr,iamahuman/angr,schieb/angr,schieb/angr,chubbymaggie/simuvex,chubbymaggie/angr,angr/angr,f-prettyland/angr,iamahuman/angr | simuvex/procedures/libc___so___6/rewind.py | simuvex/procedures/libc___so___6/rewind.py | import simuvex
from . import _IO_FILE
######################################
# rewind
######################################
class rewind(simuvex.SimProcedure):
#pylint:disable=arguments-differ
def run(self, file_ptr):
fseek = simuvex.SimProcedures['libc.so.6']['fseek']
self.inline_call(fseek, file_ptr, 0, 0)
return None
| bsd-2-clause | Python | |
9adcdf5f6ba9a57bf651c5a1845ea64711b5e4a2 | add tests for create_image function | kaiCu/mapproxy,vrsource/mapproxy,camptocamp/mapproxy,geoadmin/mapproxy,procrastinatio/mapproxy,Anderson0026/mapproxy,geoadmin/mapproxy,camptocamp/mapproxy,drnextgis/mapproxy,mapproxy/mapproxy,faegi/mapproxy,olt/mapproxy,procrastinatio/mapproxy,drnextgis/mapproxy,olt/mapproxy,vrsource/mapproxy,faegi/mapproxy,mapproxy/mapproxy,Anderson0026/mapproxy,kaiCu/mapproxy | mapproxy/test/unit/test_image_options.py | mapproxy/test/unit/test_image_options.py | # This file is part of the MapProxy project.
# Copyright (C) 2011 Omniscale <http://omniscale.de>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from mapproxy.image.opts import ImageOptions, create_image
from nose.tools import eq_
class TestCreateImage(object):
def test_default(self):
img = create_image((100, 100))
eq_(img.size, (100, 100))
eq_(img.mode, 'RGB')
eq_(img.getcolors(), [(100*100, (255, 255, 255))])
def test_transparent(self):
img = create_image((100, 100), ImageOptions(transparent=True))
eq_(img.size, (100, 100))
eq_(img.mode, 'RGBA')
eq_(img.getcolors(), [(100*100, (255, 255, 255, 0))])
def test_transparent_rgb(self):
img = create_image((100, 100), ImageOptions(mode='RGB', transparent=True))
eq_(img.size, (100, 100))
eq_(img.mode, 'RGB')
eq_(img.getcolors(), [(100*100, (255, 255, 255))])
def test_bgcolor(self):
img = create_image((100, 100), ImageOptions(bgcolor=(200, 100, 0)))
eq_(img.size, (100, 100))
eq_(img.mode, 'RGB')
eq_(img.getcolors(), [(100*100, (200, 100, 0))])
def test_rgba_bgcolor(self):
img = create_image((100, 100), ImageOptions(bgcolor=(200, 100, 0, 30)))
eq_(img.size, (100, 100))
eq_(img.mode, 'RGB')
eq_(img.getcolors(), [(100*100, (200, 100, 0))])
def test_rgba_bgcolor_transparent(self):
img = create_image((100, 100), ImageOptions(bgcolor=(200, 100, 0, 30), transparent=True))
eq_(img.size, (100, 100))
eq_(img.mode, 'RGBA')
eq_(img.getcolors(), [(100*100, (200, 100, 0, 30))])
def test_rgba_bgcolor_rgba_mode(self):
img = create_image((100, 100), ImageOptions(bgcolor=(200, 100, 0, 30), mode='RGBA'))
eq_(img.size, (100, 100))
eq_(img.mode, 'RGBA')
eq_(img.getcolors(), [(100*100, (200, 100, 0, 30))])
| apache-2.0 | Python | |
f43a7dbce4e78bd658b01652c63d852aea5fed0e | add copyFilesFromList.py script | CarnegieHall/quality-control,CarnegieHall/quality-control | copyFilesFromList.py | copyFilesFromList.py | # !/usr/local/bin/python3.4.2
# ----Copyright (c) 2016 Carnegie Hall | The MIT License (MIT)----
# ----For the full license terms, please visit https://github.com/CarnegieHall/quality-control/blob/master/LICENSE----
# argument 0 is the script name
# argument 1 is the path to the grandparent directory of all assets
# argument 2 is the path to the metadata spreadsheet
# argument 3 is the path to the target destination directory for subset of assets
import csv
import os
from os import listdir
from os.path import isfile, join, split
import sys
import shutil
from shutil import copyfile
# def copyFile(src, dest):
# try:
# shutil.copy(src, dest)
# # eg. src and dest are the same file
# except shutil.Error as e:
# print('Error: %s' % e)
# # eg. source or destination doesn't exist
# except IOError as e:
# print('Error: %s' % e.strerror)
#Set filepath variables
filePath_1 = str(sys.argv[1])
filePath_2 = str(sys.argv[2])
filePath_3 = str(sys.argv[3])
# for files in os.walk(filePath_1):
# print(files[2])
with open(filePath_2, 'rU') as f:
fileData = csv.reader(f, dialect='excel', delimiter=',')
next(fileData, None) # skip the headers
for row in fileData:
#verify row #
uploadFilename = row[17]
# print(uploadFilename)
for rootFolder, subdirFolder, files in os.walk(filePath_1):
# matchedFile = files[2]
var1 = rootFolder
var2 = subdirFolder
var3 = files
# print(var1)
# print(var2)
# print(var3)
for item in files:
if uploadFilename in item:
# print(uploadFilename, '\t', files)
inputPath = ''.join([str(var1), '/', str(uploadFilename)])
# print(inputPath)
# shutil.copy(src, dst) does not preserve original modification and access datetime stamps. Benefits are this is faster than shutil.copy2
if inputPath is None:
print('File not found:', '\t', uploadFilename)
else:
shutil.copy2(inputPath, filePath_3)
print('File copied:', '\t', uploadFilename)
| mit | Python | |
20c5dee179bcb4a6b153bad04fb400cf16e5e01b | Test for basefixture | Peter-Slump/django-dynamic-fixtures,Peter-Slump/django-factory-boy-fixtures | tests/fixtures/test_basefixture.py | tests/fixtures/test_basefixture.py | from unittest import TestCase
from dynamic_fixtures.fixtures.basefixture import BaseFixture
class BaseFixtureTestCase(TestCase):
def test_load_not_implemented(self):
"""
Case: load is not implemented
Expected: Error get raised
"""
fixture = BaseFixture('Name', 'Module')
with self.assertRaises(NotImplementedError):
fixture.load()
| mit | Python | |
777ddec03e59ceace746dca4e73d39c71400d10e | Initialize P03_combinePdfs | JoseALermaIII/python-tutorials,JoseALermaIII/python-tutorials | books/AutomateTheBoringStuffWithPython/Chapter13/P03_combinePdfs.py | books/AutomateTheBoringStuffWithPython/Chapter13/P03_combinePdfs.py | #! python3
# combinePdfs.py - Combines all the PDFs in the current working directory into
# a single PDF.
import PyPDF4, os
# Get all the PDF filenames.
pdfFiles = []
for filename in os.listdir('.'):
if filename.endswith(".pdf"):
pdfFiles.append(filename)
pdfFiles.sort(key = str.lower)
pdfWriter = PyPDF4.PdfFileWriter()
# Loop through all the PDF files.
for filename in pdfFiles:
pdfFileObj = open(filename, "rb")
pdfReader = PyPDF4.PdfFileReader(pdfFileObj)
# Loop through all the pages (except the first) and add them.
for pageNum in range(1, pdfReader.numPages):
pageObj = pdfReader.getPage(pageNum)
pdfWriter.addPage(pageObj)
# Save the resulting PDF to a file.
pdfOutput = open("allminutes.pdf", "wb")
pdfWriter.write(pdfOutput)
pdfOutput.close()
| mit | Python | |
4c473b54ba64e642efb454a309d2027cd902cc17 | add __init__.py | aterrel/blaze,jcrist/blaze,mrocklin/blaze,dwillmer/blaze,ChinaQuants/blaze,cowlicks/blaze,xlhtc007/blaze,maxalbert/blaze,caseyclements/blaze,cpcloud/blaze,cowlicks/blaze,ChinaQuants/blaze,nkhuyu/blaze,LiaoPan/blaze,scls19fr/blaze,cpcloud/blaze,aterrel/blaze,LiaoPan/blaze,maxalbert/blaze,nkhuyu/blaze,scls19fr/blaze,xlhtc007/blaze,dwillmer/blaze,aterrel/blaze,ContinuumIO/blaze,ContinuumIO/blaze,jdmcbr/blaze,mrocklin/blaze,alexmojaki/blaze,jcrist/blaze,jdmcbr/blaze,caseyclements/blaze,alexmojaki/blaze | blaze/data/__init__.py | blaze/data/__init__.py | from __future__ import absolute_import, division, print_function
from .core import *
from .csv import *
from .sql import *
from .json import *
from .hdf5 import *
from .filesystem import *
from .usability import *
| bsd-3-clause | Python | |
60b629907fe3e880a47825886858c723e41248a5 | add a small script for dumping investigation/incident tags from mozdef | ameihm0912/vmintgr,ameihm0912/vmintgr | misc/mozdef/incinv.py | misc/mozdef/incinv.py | #!/usr/bin/python
import sys
import getopt
from datetime import datetime
from pymongo import MongoClient
import pytz
mclient = None
incident_tagcnt = {}
inves_tagcnt = {}
def tag_summary():
sys.stdout.write('######## tag summary (incidents) ########\n')
for x in incident_tagcnt:
sys.stdout.write('{} {}\n'.format(x, incident_tagcnt[x]))
sys.stdout.write('######## tag summary (investigations) ########\n')
for x in inves_tagcnt:
sys.stdout.write('{} {}\n'.format(x, inves_tagcnt[x]))
def dump_incidents(q):
global incident_tagcnt
mozdefdb = mclient['meteor']
incidents = mozdefdb['incidents']
cursor = incidents.find(q).sort("dateOpened", 1)
cnt = 0
sys.stdout.write('######## incidents ########\n')
for i in cursor:
sys.stdout.write('-------- {} --------\n'.format(cnt))
sys.stdout.write(i['summary'] + '\n')
sys.stdout.write(i['description'] + '\n')
sys.stdout.write('Date opened: {}\n'.format(i['dateOpened']))
for x in i['tags']:
sys.stdout.write(x + '\n')
if x not in incident_tagcnt:
incident_tagcnt[x] = 1
else:
incident_tagcnt[x] += 1
cnt += 1
def dump_investigations(q):
global inves_tagcnt
mozdefdb = mclient['meteor']
incidents = mozdefdb['investigations']
cursor = incidents.find(q).sort("dateOpened", 1)
cnt = 0
sys.stdout.write('######## investigations ########\n')
for i in cursor:
sys.stdout.write('-------- {} --------\n'.format(cnt))
sys.stdout.write(i['summary'] + '\n')
sys.stdout.write(i['description'] + '\n')
sys.stdout.write('Date opened: {}\n'.format(i['dateOpened']))
for x in i['tags']:
sys.stdout.write(x + '\n')
if x not in inves_tagcnt:
inves_tagcnt[x] = 1
else:
inves_tagcnt[x] += 1
cnt += 1
def usage():
sys.stdout.write('usage: incinv.py mozdef_host start_date end_date\n')
sys.exit(1)
def domain():
global mclient
if len(sys.argv) != 4:
usage()
mozdef_host = sys.argv[1]
utc = pytz.utc
start_date = utc.localize(datetime.strptime(sys.argv[2], '%Y-%m-%d'))
end_date = utc.localize(datetime.strptime(sys.argv[3], '%Y-%m-%d'))
q = {}
q['dateOpened'] = {"$gte": start_date, "$lte": end_date}
mclient = MongoClient(mozdef_host, 3002)
dump_incidents(q)
dump_investigations(q)
tag_summary()
if __name__ == '__main__':
domain()
sys.exit(0)
| mpl-2.0 | Python | |
15ccb7ab2b5d51d69c77fc84f4efc634a82a4b18 | Create add_P68wp_P70moe.py | Xi-Plus/Xiplus-Wikipedia-Bot,Xi-Plus/Xiplus-Wikipedia-Bot | my-ACG/import-claims/add_P68wp_P70moe.py | my-ACG/import-claims/add_P68wp_P70moe.py | # -*- coding: utf-8 -*-
import argparse
import json
import os
os.environ['PYWIKIBOT_DIR'] = os.path.dirname(os.path.realpath(__file__))
import pywikibot
from pywikibot.data.api import Request
import requests
from config import API, PASSWORD, USER # pylint: disable=E0611
site = pywikibot.Site()
site.login()
datasite = site.data_repository()
zhsite = pywikibot.Site('zh', 'wikipedia')
moesite = pywikibot.Site('moegirl', 'moegirl')
session = requests.Session()
print('fetching login token')
res = session.get(API, params={
'action': 'query',
'meta': 'tokens',
'type': 'login',
'format': 'json',
}).json()
logintoken = res['query']['tokens']['logintoken']
print('logging in')
res = session.post(API, data={
'action': 'login',
'lgname': USER,
'lgpassword': PASSWORD,
'lgtoken': logintoken,
'format': 'json',
}).json()
if res['login']['result'] == 'Success':
print('login success')
else:
exit('login fail\n')
res = session.get(API, params={
'action': 'query',
'meta': 'tokens',
'type': 'csrf',
'format': 'json',
}).json()
csrftoken = res['query']['tokens']['csrftoken']
print('csrftoken', csrftoken)
def converttitle(site, title):
r = Request(site=site, parameters={
'action': 'query',
'titles': title,
'redirects': 1,
'converttitles': 1
})
data = r.submit()
page = list(data['query']['pages'].values())[0]
if 'missing' in page:
return None
return page['title'].replace(' ', '_')
def addWpAndMoe(title):
title = title.replace('Item:', '')
title = title.replace('Property:', '')
print(title)
data = {
'claims': []
}
if title[0] == 'Q':
myitem = pywikibot.ItemPage(datasite, title)
elif title[0] == 'P':
myitem = pywikibot.PropertyPage(datasite, title)
else:
print('\t Not Wikibase page')
return
myitem.get()
label = myitem.labels['zh-tw']
print('\t', label)
if 'P68' not in myitem.claims:
targettitle = converttitle(zhsite, label)
if targettitle:
new_claim = pywikibot.page.Claim(datasite, 'P68')
new_claim.setTarget(targettitle)
print('\t Add P68', targettitle)
data['claims'].append(new_claim.toJSON())
if 'P70' not in myitem.claims:
targettitle = converttitle(moesite, label)
if targettitle:
new_claim = pywikibot.page.Claim(datasite, 'P70')
new_claim.setTarget(targettitle)
print('\t Add P70', targettitle)
data['claims'].append(new_claim.toJSON())
if data['claims']:
print('\t', data)
session.post(API, data={
'action': 'wbeditentity',
'format': 'json',
'id': title,
'data': json.dumps(data),
'summary': '自動新增對應維基頁面',
'token': csrftoken,
'bot': 1,
}).json()
def main():
Q53 = pywikibot.ItemPage(datasite, 'Q53')
for backlink in Q53.backlinks():
addWpAndMoe(backlink.title())
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('title', nargs='?')
args = parser.parse_args()
if args.title:
addWpAndMoe(args.title)
else:
main()
| mit | Python | |
c0cd91fe44a2ff653cad9f15a5363d9cc97bfa75 | Add a snippet. | jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets | python/astropy/fits/write_binary_table.py | python/astropy/fits/write_binary_table.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Documentation:
# - http://docs.astropy.org/en/stable/io/fits/
# - http://docs.astropy.org/en/stable/io/fits/api/files.html
# - http://www.astropy.org/astropy-tutorials/FITS-tables.html
# - http://www.astropy.org/astropy-tutorials/FITS-images.html
# - http://www.astropy.org/astropy-tutorials/FITS-header.html
import argparse
from astropy.io import fits
import numpy as np
import astropy.table
# PARSE OPTIONS ###############################################################
parser = argparse.ArgumentParser(description="An astropy snippet")
parser.add_argument("filearg", nargs=1, metavar="FILE", help="the output FITS file")
args = parser.parse_args()
file_path = args.filearg[0]
# WRITE DATA ##################################################################
table = astropy.table.Table(names=("column1", "column2", "column3"))
table.add_row([1, 2, 3])
table.add_row([10, 20, 30])
table.add_row([100, 200, 300])
print(table)
table.write(file_path, overwrite=True)
| mit | Python | |
e2e5f3ced06bc7c4a603ad5ff73e6df8690aaa0f | Add top_wikipedia_pages.py. | jhanley634/testing-tools,jhanley634/testing-tools,jhanley634/testing-tools,jhanley634/testing-tools,jhanley634/testing-tools,jhanley634/testing-tools,jhanley634/testing-tools | problem/bench/traffic/top_wikipedia_pages.py | problem/bench/traffic/top_wikipedia_pages.py | #! /usr/bin/env python
# Copyright 2020 John Hanley.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# The software is provided "AS IS", without warranty of any kind, express or
# implied, including but not limited to the warranties of merchantability,
# fitness for a particular purpose and noninfringement. In no event shall
# the authors or copyright holders be liable for any claim, damages or
# other liability, whether in an action of contract, tort or otherwise,
# arising from, out of or in connection with the software or the use or
# other dealings in the software.
from collections import namedtuple
from operator import attrgetter
import pprint
import pandas as pd
import requests
Pageviews = namedtuple('pageviews', 'article rank views'.split())
def csv_from_json(date='2020/01/22', out_fspec='/tmp/pv.csv', verbose=False):
base = ('https://wikimedia.org/api/rest_v1/metrics'
'/pageviews/top/en.wikipedia.org/all-access')
url = f'{base}/{date}'
print(url)
req = requests.get(url)
req.raise_for_status()
articles = req.json()['items'][0]['articles']
articles = [Pageviews(**article)
for article in articles]
if verbose:
pprint.pprint(sorted(articles, key=attrgetter('views')))
df = pd.DataFrame(articles)
print(df)
df.to_csv(out_fspec, index=False)
if __name__ == '__main__':
csv_from_json()
| mit | Python | |
2dee387e43c8d57f7b6f9b291260d8f6dda42bd0 | add measure.py again | lohner/FormalSSA,lohner/FormalSSA,lohner/FormalSSA | measure.py | measure.py | #!/usr/bin/env python3
import sys
import collections
import re
times = collections.defaultdict(lambda: 0.0)
phis = collections.defaultdict(lambda: 0)
for line in sys.stdin:
m = re.match(r'(.*) (\d+\.\d+)', line)
if m:
times[m.group(1)] += float(m.group(2))
m = re.match(r'(.*) (\d+)$', line)
if m:
phis[m.group(1)] += int(m.group(2))
for name, time in sorted(times.items()):
print("{} {:.3f}".format(name, time))
for name, count in sorted(phis.items()):
print("{} {:d}".format(name, count))
| bsd-3-clause | Python | |
3d221a09b383f4a71587c165cf85912cabb253e2 | Test script for some failing Dubins 3d paths while planning | fire-rs-laas/fire-rs-saop,fire-rs-laas/fire-rs-saop,fire-rs-laas/fire-rs-saop,fire-rs-laas/fire-rs-saop | python/fire_rs/planning/test_dubins3dpath.py | python/fire_rs/planning/test_dubins3dpath.py | # Copyright (c) 2017, CNRS-LAAS
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import fire_rs.uav_planning as up
if __name__ == '__main__':
def traj(uav, orig, dest):
origin = up.Waypoint(orig[0], orig[1], orig[2], orig[3])
target = up.Waypoint(dest[0], dest[1], dest[2], dest[3])
samples = uav.path_sampling(origin, target, 1)
x = [wp.x for wp in samples]
y = [wp.y for wp in samples]
z = [wp.z for wp in samples]
return x, y, z
uav_speed = 18. # m/s
uav_max_turn_rate = 32. * np.pi / 180
uav_max_pitch_angle = 6. / 180. * np.pi
uav = up.UAV(uav_speed, uav_max_turn_rate, uav_max_pitch_angle)
# Examples of failing trajectories while planing but not here.
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
wp_ma = [(480160, 6210174, 100, 0), (480160, 6210174, 100, 0),
(483844.51013562991, 6216373.3243948203, 1073, 1.0377441685192268), (483869.91835593828, 6216416.387399137, 1073, 1.0377441685192268),
(484047.24726410303, 6216721.4141843673, 1104, 0.49659006833227348), (484091.20787735051, 6216745.2356973942, 1104, 0.49659006833227348),
(484144.84432897501, 6216823.1286639366, 1027, 1.0441990609703493), (484169.97405482916, 6216866.3547773231, 1027, 1.0441990609703493),
(480160, 6210174, 100, 0), (480160, 6210174, 100, 0),]
for i in range(len(wp_ma) - 1):
ax.plot(*traj(uav, wp_ma[i], wp_ma[i + 1]))
plt.xlabel("x")
plt.ylabel("y")
plt.show(block=False)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
wp_ma = [(485046.97901929758, 6212440.4672314832, 1088, 1.2639035346324197),
(485195.52938023448, 6212499.8414709819, 1055, 0.4326838402098257)]
for i in range(len(wp_ma) - 1):
ax.plot(*traj(uav, wp_ma[i], wp_ma[i + 1]))
plt.xlabel("x")
plt.ylabel("y")
plt.show(block=False)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
wp_ma = [(485309.30963665573, 6213078.6511183679, 983, -2.7678397231859977),
(485467.56226040138, 6212850.4756678976, 875, -0.99961171783044567)]
for i in range(len(wp_ma) - 1):
ax.plot(*traj(uav, wp_ma[i], wp_ma[i + 1]))
plt.xlabel("x")
plt.ylabel("y")
plt.show(block=False)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
wp_ma = [(485290, 6213075, 983, 0), (485397.89720828738, 6213320.2460238663, 897, 0.51887048029539962)]
for i in range(len(wp_ma) - 1):
ax.plot(*traj(uav, wp_ma[i], wp_ma[i + 1]))
plt.xlabel("x")
plt.ylabel("y")
plt.show(block=True)
| bsd-2-clause | Python | |
ff7f9a09afaf2057d3f926a56c48ad873f6c16f6 | Add missing migration | KlubJagiellonski/pola-backend,KlubJagiellonski/pola-backend,KlubJagiellonski/pola-backend,KlubJagiellonski/pola-backend | report/migrations/0002_auto_20151013_2305.py | report/migrations/0002_auto_20151013_2305.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('report', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='report',
name='product',
field=models.ForeignKey(to='product.Product', null=True),
),
]
| bsd-3-clause | Python | |
6449632df519113c143de282df38cda7798807fa | Add autoencoder that contains a Layer. | mdenil/parameter_prediction,mdenil/parameter_prediction,mdenil/parameter_prediction | parameter_prediction/models/autoencoder.py | parameter_prediction/models/autoencoder.py | import numpy as np
import theano.tensor as T
from pylearn2.base import Block
from pylearn2.models import Model
from pylearn2.space import VectorSpace
def _identity(x):
return x
def _rectified(x):
return x * (x > 0)
# Don't put lambda functions in here or pickle will yell at you when you try to
# save an Autoencoder.
DECODER_FUNCTION_MAP = {
'linear': _identity,
'rectified': _rectified,
'sigmoid': T.nnet.sigmoid,
}
class Autoencoder(Block, Model):
"""
A basic Autoencoder that uses a Layer for the upward pass.
The reconstruction function is:
x_recon = act_dec(layer.transformer.lmul_T(layer.fprop(x)))
act_dec is specified as a string and can be any of the following:
'linear': act_dec(x) = x
'rectified': act_dec(x) = x * (x > 0)
'sigmoid' : act_dec(x) = T.nnet.sigmoid(x)
"""
def __init__(self, nvis, layer, act_dec='linear', seed=None):
super(Autoencoder, self).__init__()
self.input_space = VectorSpace(nvis)
self.output_space = VectorSpace(layer.dim)
self.act_dec = DECODER_FUNCTION_MAP[act_dec]
# self is not really an mlp, but the only thing layer.mlp is used for
# internally is getting access to rng, which we have
self.rng = np.random.RandomState(seed)
layer.mlp = self
layer.set_input_space(self.input_space)
self.layer = layer
def upward_pass(self, inputs):
return self.encode(inputs)
def encode(self, inputs):
return self.layer.fprop(inputs)
def decode(self, hiddens):
return self.act_dec(self.layer.transformer.lmul_T(hiddens))
def reconstruct(self, inputs):
return self.decode(self.encode(inputs))
def get_weights(self, borrow=False):
W, = self.layer.transformer.get_params()
return W.get_value(borrow=borrow)
def get_weights_format(self):
return ['v', 'h']
def get_params(self):
return self.layer.get_params()
def __call__(self, inputs):
return self.encode(inputs)
# Use version defined in Model, rather than Block (which raises
# NotImplementedError).
get_input_space = Model.get_input_space
get_output_space = Model.get_output_space
| mit | Python | |
b154a5cd852ecf7f1e27f012a4a42c5ede307561 | Create zadanie2.py | krzyszti/my_projects,krzyszti/my_projects,krzyszti/my_projects,krzyszti/my_projects | exercises/zadanie2.py | exercises/zadanie2.py | '''
PODANA JEST LISTA ZAWIERAJĄCA ELEMENTY O WARTOŚCIACH 1-n. NAPISZ FUNKCJĘ KTÓRA SPRAWDZI JAKICH ELEMENTÓW BRAKUJE
1-n = [1,2,3,4,5,...,10]
np. n=10
wejście: [2,3,7,4,9], 10
wyjście: [1,5,6,8,10]
'''
def n(lst, max):
result = []
i = 1
while i<=max:
if (i in lst) == False:
result.append(i)
i += 1
return result
| mit | Python | |
c62711b7cc9e921a3c2eb0e04c81e92ed2c82596 | Add python code to verify doublesha256 calculating. | HashRatio/w5500_test,HashRatio/w5500_test,HashRatio/w5500_test,HashRatio/w5500_test,HashRatio/w5500_test,HashRatio/w5500_test | firmware/work_test.py | firmware/work_test.py | import hashlib
import binascii
import string
prev_hash = "4d16b6f85af6e2198f44ae2a6de67f78487ae5611b77c6c0440b921e00000000"
coinbase1 = "01000000010000000000000000000000000000000000000000000000000000000000000000ffffffff20020862062f503253482f04b8864e5008"
coinbase2 = "072f736c7573682f000000000100f2052a010000001976a914d23fcdf86f7e756a64a7a9688ef9903327048ed988ac00000000"
merkle_branch = []
version = "00000002"
nbits = "1c2ac4af"
ntime = "504e86b9"
extra_nonce1 = "55555555"
extra_nonce2 = "aaaaaaaa"
coinbase = coinbase1 + extra_nonce1 + extra_nonce2 + coinbase2
merkle_branch = [
"1111111111111111111111111111111111111111111111111111111111111111",
"2222222222222222222222222222222222222222222222222222222222222222",
"3333333333333333333333333333333333333333333333333333333333333333",
"4444444444444444444444444444444444444444444444444444444444444444",
"5555555555555555555555555555555555555555555555555555555555555555",
]
#print len(coinbase1)
#print coinbase,len(coinbase)
coinbase_hash_bin = hashlib.sha256(hashlib.sha256(binascii.unhexlify(coinbase)).digest()).digest()
#print binascii.hexlify(coinbase_hash_bin)
def build_merkle_root(merkle_branch, coinbase_hash_bin):
merkle_root = coinbase_hash_bin
for h in merkle_branch:
merkle_root = hashlib.sha256(hashlib.sha256(merkle_root + binascii.unhexlify(h)).digest()).digest()
return binascii.hexlify(merkle_root)
print build_merkle_root(merkle_branch,coinbase_hash_bin)
| unlicense | Python | |
544907c4222a7c0aead8d35893956ecca79056ba | Add fish-chips example | mstruijs/neural-demos,mstruijs/neural-demos | fish-chips-ketchup.py | fish-chips-ketchup.py | import numpy as np
import random
from neupy import layers, algorithms,plots, init
from neupy.layers.utils import iter_parameters
#Define network.
network = layers.Input(3) > layers.Linear(1,weight=init.Constant(0),bias=None)
#Toggle some debug printing
verbose = True
if verbose:
#show network details
for layer, attrname, parameter in iter_parameters(network):
# parameter is shared Theano variable
parameter_value = parameter.get_value()
print("Layer: {}".format(layer))
print("Parameter name: {}".format(attrname))
print("Parameter shape: {}".format(parameter_value.shape))
print()
#construct network with learning rule to get adaline.
#GradientDescent on a linear activation function means the delta rule is used.
adaline = algorithms.GradientDescent(
network,
step=0.001,
show_epoch=100,
verbose=verbose
)
def random_train_adaline(values, buy_limit, samples, plot=True, epochs=3000,epsilon=0.001):
'''
Trains the adaline by providing randomly selected amounts of the values and determining the correct cost but adding them in the right values.
values : the price of each product
buy_limit : maximum amount bought of every product for a sample
samples : the number of random samples generated
plot : show the error plot. Might require the appropriate visualisation program.
epochs : the number of iterations used to train the adaline. Will stop earlier if it has converged or convergence is impossible
epsilon : Epsilon used for the delta rule
'''
#pick random tuples of
random_input = [[random.randint(0,buy_limit) for y in range(0,len(values))] for x in range(0,samples)]
#compute the total cost of every tuple
output = [sum([sample[i]*values[i] for i in range(0,len(values))]) for sample in random_input]
print(random_input)
print(output)
training_data = np.array(random_input)
output_data = np.array(output)
adaline.train(training_data, output_data,epochs=epochs,epsilon=epsilon)
if plot:
plots.error_plot(adaline)
if __name__ == "__main__":
random_train_adaline([5,3,1], 10, 10)
print(adaline.predict([[1,0,0]]))
print(adaline.predict([[0,1,0]]))
print(adaline.predict([[0,0,1]])) | mit | Python | |
5fccf078c654b69344ded47c2f4c7abddbd52c4d | Add initial profiles | PyCQA/isort,PyCQA/isort | isort/profiles.py | isort/profiles.py | """Common profiles are defined here to be easily used within a project using --profile {name}"""
black = {
"multi_line_output": 3,
"include_trailing_comma": True,
"force_grid_wrap": 0,
"use_parentheses": True,
}
django = {
"combine_as_imports": True,
"include_trailing_comma": True,
"multi_line_output": 5,
"line_length": 79,
}
pycharm = {"multi_line_output": 3, "force_grid_wrap": 2}
google = {"force_single_line": True, "force_sort_within_sections": True, "lexicographical": True}
open_stack = {
"force_single_line": True,
"force_sort_within_sections": True,
"lexicographical": True,
}
plone = {
"force_alphabetical_sort": True,
"force_single_line": True,
"ines_after_imports": 2,
"line_length": 200,
}
attrs = {
"atomic": True,
"force_grid_wrap": 0,
"include_trailing_comma": True,
"lines_after_imports": 2,
"lines_between_types": 1,
"multi_line_output": 3,
"not_skip": "__init__.py",
"use_parentheses": True,
}
hug = {
"multi_line_output": 3,
"include_trailing_comma": True,
"force_grid_wrap": 0,
"use_parentheses": True,
"line_length": 100,
}
profiles = {
"black": black,
"django": django,
"pycharm": pycharm,
"google": google,
"open_stack": open_stack,
"plone": plone,
"attrs": attrs,
"hug": hug,
}
| mit | Python | |
a798b23d8dfbf8b415a85a04666466d1605208b5 | add celery app file | praekelt/molo-tuneme,praekelt/molo-tuneme,praekelt/molo-tuneme,praekelt/molo-tuneme | tuneme/celery.py | tuneme/celery.py | from __future__ import absolute_import
import os
from celery import Celery
from django.conf import settings
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE',
'tuneme.settings.production')
app = Celery('proj')
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
| bsd-2-clause | Python | |
192012b57cddec724f73f6ab031b13862db134ef | Create twitterstream.py | jigarkb/Twitter-Sentiment-Analysis | twitterstream.py | twitterstream.py | import oauth2 as oauth
import urllib2 as urllib
access_token_key = "105782391-kfbeApbulMptbrV9w6Bfh5MHrAiUUqqmd1xmD2az"
access_token_secret = "fuIwYDPiqTFvfI4jPJVbCWxZBZaL0ESiq4IMD30c1o"
consumer_key = "1ANfOmOIRa4iaJideGYAg"
consumer_secret = "MmrodYL5xcpjFrFcG8Y4CAR5PTYbRXkKWuQgA1bU"
_debug = 0
oauth_token = oauth.Token(key=access_token_key, secret=access_token_secret)
oauth_consumer = oauth.Consumer(key=consumer_key, secret=consumer_secret)
signature_method_hmac_sha1 = oauth.SignatureMethod_HMAC_SHA1()
http_method = "GET"
http_handler = urllib.HTTPHandler(debuglevel=_debug)
https_handler = urllib.HTTPSHandler(debuglevel=_debug)
'''
Construct, sign, and open a twitter request
using the hard-coded credentials above.
'''
def twitterreq(url, method, parameters):
req = oauth.Request.from_consumer_and_token(oauth_consumer,
token=oauth_token,
http_method=http_method,
http_url=url,
parameters=parameters)
req.sign_request(signature_method_hmac_sha1, oauth_consumer, oauth_token)
headers = req.to_header()
if http_method == "POST":
encoded_post_data = req.to_postdata()
else:
encoded_post_data = None
url = req.to_url()
opener = urllib.OpenerDirector()
opener.add_handler(http_handler)
opener.add_handler(https_handler)
response = opener.open(url, encoded_post_data)
return response
def fetchsamples():
url = "https://stream.twitter.com/1/statuses/sample.json"
parameters = []
response = twitterreq(url, "GET", parameters)
for line in response:
print line.strip()
if __name__ == '__main__':
fetchsamples()
| apache-2.0 | Python | |
7b0e3c030e9cf4b1792218ad555eaeda40432283 | add a parallel version LR's roughly architecture | jasonwbw/ParallelLR | plr/models/parallel_logistic_regression.py | plr/models/parallel_logistic_regression.py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
This is a mutil-thread tool to solve common regression problem by logistic regression(hereinafter referred to as LR).
"""
from abc import ABCMeta, abstractmethod
from logistic_regression import LogisticRegression
class MatrixSpliter(object):
# TODO: comment
def __init__(self, X, Y, m, n):
# TODO: comment and all
pass
def get_submatrix(self, m, n):
# TODO: comment and complete the matrix split, get the m line n column sub matrix
pass
class CombineDt(object):
# TODO: comment
def __init__(self, m, n):
# TODO: comment and all
pass
def add(m, n, Dt):
# TODO: comment and add new result
pass
def combine():
# TODO: comment and combine all results
pass
class ParallelLogisticRegression(LogisticRegression):
"""
An abstract class for easy to use mutil-thread parallel LR model.
"""
__metaclass__ = ABCMeta
def train(self, X, Y):
# TODO: comment and complete the parallel training process
pass
@abstractmethod
def node_compute(self, X, Y):
"""
The method called by children thread to compute Dt of give sub matrix
Args:
X : the matrix that hold a x in one line
Y : a one-dimensional vector that hold the result y for give x in the same line in X
Returns:
the Dt computed by the sub matrix
"""
pass
| apache-2.0 | Python | |
f34887b247352d378cb60ceafadec80a17d342f2 | Add LXDDriver config test | tpouyer/nova-lxd,Saviq/nova-compute-lxd,tpouyer/nova-lxd,Saviq/nova-compute-lxd | nclxd/tests/test_driver_api.py | nclxd/tests/test_driver_api.py | # Copyright 2015 Canonical Ltd
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from nova import test
from nova.virt import fake
from oslo_config import cfg
from nclxd.nova.virt.lxd import driver
class LXDTestConfig(test.NoDBTestCase):
def test_config(self):
self.assertIsInstance(driver.CONF.lxd, cfg.ConfigOpts.GroupAttr)
self.assertEqual(os.path.abspath('/var/lib/lxd'),
os.path.abspath(driver.CONF.lxd.root_dir))
self.assertEqual(5, driver.CONF.lxd.timeout)
self.assertEqual('nclxd-profile', driver.CONF.lxd.default_profile)
class LXDTestDriver(test.NoDBTestCase):
def setUp(self):
super(LXDTestDriver, self).setUp()
self.connection = driver.LXDDriver(fake.FakeVirtAPI())
def test_capabilities(self):
self.assertFalse(self.connection.capabilities['has_imagecache'])
self.assertFalse(self.connection.capabilities['supports_recreate'])
self.assertFalse(
self.connection.capabilities['supports_migrate_to_same_host'])
| # Copyright 2015 Canonical Ltd
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import test
from nova.virt import fake
from oslo_config import cfg
from nclxd.nova.virt.lxd import driver
CONF = cfg.CONF
class LXDTestDriver(test.NoDBTestCase):
def setUp(self):
super(LXDTestDriver, self).setUp()
self.connection = driver.LXDDriver(fake.FakeVirtAPI())
def test_capabilities(self):
self.assertFalse(self.connection.capabilities['has_imagecache'])
self.assertFalse(self.connection.capabilities['supports_recreate'])
self.assertFalse(
self.connection.capabilities['supports_migrate_to_same_host'])
| apache-2.0 | Python |
c4eb8c9c7974335cd19f55c2e923a4aac54b3fe2 | add the init file for experiements | okkhoy/minecraft-rl | experiments/__init__.py | experiments/__init__.py | __all__ = ["episodic"]
| mit | Python | |
d7ac57998cde8b3778aa53b6e4a378d67fb5eccf | Create find-links_emails.py | frainfreeze/studying,frainfreeze/studying,frainfreeze/studying,frainfreeze/studying,frainfreeze/studying,frainfreeze/studying,frainfreeze/studying,frainfreeze/studying | python/find-links_emails.py | python/find-links_emails.py | import requests
import re
# get url
url = input('Enter a URL (include `http://`): ')
# connect to the url
website = requests.get(url)
# read html
html = website.text
# use re.findall to grab all the links
links = re.findall('"((http|ftp)s?://.*?)"', html)
emails = re.findall('([\w\.,]+@[\w\.,]+\.\w+)', html)
# print the number of links in the list
print("\nFound {} links".format(len(links)))
for email in emails:
print(email)
| mit | Python | |
b5e4805d07ad524a9d2c452780f5f360a068ce90 | add linked list cycle | SakiFu/leetcode | python/linked_list_cycle.py | python/linked_list_cycle.py | """
Given a linked list, determine if it has a cycle in it.
Follow up:
Can you solve it without using extra space?
"""
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def hasCycle(self, head):
if not head:
return False
slow = head
fast = head
while fast and fast.next:
fast = fast.next.next
slow = slow.next
if fast == slow:
return True
return False | mit | Python | |
13b3320399e51bbbb4018ea5fb3ff6d63c8864c7 | Add quick run script for celex fullscale tests | jacobkrantz/ProbSyllabifier | celexRunScript.py | celexRunScript.py | from celex import Celex
'''
- Allows for a quick run of a desired size pulled from any evolution file.
- Automatically loads the best chromosome from the evolution to test.
- Useful for running large-scale tests of a chromosome that appears
to be performing well.
- For small tests to ensure system functionality, run the unit tests (see README.md)
- is thread-safe.
Set parameters here:
'''
evoFileLocation = "./GeneticAlgorithm/EvolutionLogs/Archive/1/evo309.log"
trainingSize = 500
testingSize = 25
'''---------------------------------------'''
with open(evoFileLocation,'r') as evo:
bestEvoRaw = evo.readline().split()
transcriptionScheme = []
for category in bestEvoRaw:
transcriptionScheme.append(map(lambda x: x, category))
c = Celex()
c.loadSets(trainingSize,testingSize)
GUID = c.trainHMM(transcriptionScheme)
percentSame = c.testHMM(transcriptionScheme, GUID)
| mit | Python | |
26040edd17b7b1eff3317bbc87aa4548fe27aefa | reset random seed before each scenario | mode89/snn,mode89/snn,mode89/snn | features/environment.py | features/environment.py | import random
def before_scenario(context, scenario):
random.seed(0)
| mit | Python | |
ec6d4042f13a641975bbcd2598e76b0cefef0b54 | add extractor.py | edonyM/toolkitem,edonyM/toolkitem,edonyM/toolkitem | fileparser/extractor.py | fileparser/extractor.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
r"""
# .---. .-----------
# / \ __ / ------
# / / \( )/ ----- (`-') _ _(`-') <-. (`-')_
# ////// '\/ ` --- ( OO).-/( (OO ).-> .-> \( OO) ) .->
# //// / // : : --- (,------. \ .'_ (`-')----. ,--./ ,--/ ,--.' ,-.
# // / / / `\/ '-- | .---' '`'-..__)( OO).-. ' | \ | | (`-')'.' /
# // //..\\ (| '--. | | ' |( _) | | | | . '| |)(OO \ /
# ============UU====UU==== | .--' | | / : \| |)| | | |\ | | / /)
# '//||\\` | `---. | '-' / ' '-' ' | | \ | `-/ /`
# ''`` `------' `------' `-----' `--' `--' `--'
# ######################################################################################
#
# Author: edony - edonyzpc@gmail.com
#
# twitter : @edonyzpc
#
# Last modified: 2015-05-28 22:20
#
# Filename: extractor.py
#
# Description: All Rights Are Reserved
#
# ******
# Extract the specific content from text with the given keys.
"""
import os
import re
class Extractor(object):
"""
Extract the specific content with keys.
"""
def __init__(self, keys, extracted_file):
self.keys = keys
self.extracted_file
| mit | Python | |
c839d8ce8fb6a1f7c7ab0acc3a8840bb3569d27d | Add merge migration | swcarpentry/amy,pbanaszkiewicz/amy,vahtras/amy,pbanaszkiewicz/amy,swcarpentry/amy,wking/swc-amy,vahtras/amy,shapiromatron/amy,shapiromatron/amy,pbanaszkiewicz/amy,swcarpentry/amy,wking/swc-amy,shapiromatron/amy,vahtras/amy,wking/swc-amy,wking/swc-amy | workshops/migrations/0015_merge.py | workshops/migrations/0015_merge.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('workshops', '0011_person_badges'),
('workshops', '0014_merge'),
]
operations = [
]
| mit | Python | |
aa7f40c4451fa5816d456012aec94dff83b784b0 | Add Actual Converter | SinisterSoda/simpleconverter | flac_to_alac_convert.py | flac_to_alac_convert.py | import os
import subprocess
#this is the default folder it traverses
default = r'C:\Users\Mikey\Music\The_Strokes'
print ("Default Directory is: " + default)
rootdir = input("Input the Directory (Or leave blank for default directory): ")
#this is the directory where ffmpeg is located
ffmpeg = "C:\\Users\\Mikey\\Desktop\\ffmpeg\\bin\\ffmpeg"
i = 0
#will set the folder to default if no folder is input by the user
if rootdir == "":
rootdir = default
print("Traversing directory: " + rootdir)
#loop that traverses the folder iteratively
outputfile = open(r'C:\Users\Mikey\Desktop\temp.txt','w')
for subdir, dirs, files in os.walk(rootdir):
#goes through each file in the subfolder
print("Going through directory: " + subdir)
for file in files:
f = os.path.join(subdir, file)
#get the base name of the file without extension
basef = os.path.splitext(f)[0]
print("Converting: " + f)
#the ffmpeg command that actually does the converting
command = ffmpeg + " -i \"" + f + "\" -acodec alac \"" + basef + ".m4a\""
print("Running command: ")
print(command)
#runs the actual command
subprocess.call(command, shell=True, stdout=outputfile)
i+=1
print("Converted file # " + str(i))
outputfile.close()
print ("A total of " + str(i) + " files have been converted")
| mit | Python | |
25a4c9ba978aef7f648904c654fcc044f429acd4 | Add Subjects model, methods for report and export | qedsoftware/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq | custom/openclinica/models.py | custom/openclinica/models.py | from collections import defaultdict
from corehq.apps.users.models import CouchUser
from custom.openclinica.const import AUDIT_LOGS
from custom.openclinica.utils import (
OpenClinicaIntegrationError,
is_item_group_repeating,
is_study_event_repeating,
get_item_measurement_unit,
get_question_item,
get_oc_user,
get_study_event_name,
)
class Subject(object):
"""
Manages data for a subject case
"""
def __init__(self, subject_key, study_subject_id, domain):
self.subject_key = subject_key
self.study_subject_id = study_subject_id
# We need the domain to get study metadata for study events and item groups
self._domain = domain
# This subject's data. Stored as subject[study_event_oid][i][form_oid][item_group_oid][j][item_oid]
# (Study events and item groups are lists because they can repeat.)
self.data = {}
def get_report_events(self):
"""
The events as they appear in the report.
These are useful for scheduling events in OpenClinica, which cannot be imported from ODM until they have
been scheduled.
"""
events = []
for study_events in self.data.itervalues():
for study_event in study_events:
events.append(
'"{name}" ({start} - {end})'.format(
name=study_event.name,
start=study_event.start_short,
end=study_event.end_short))
return ', '.join(events)
def get_export_data(self):
"""
Transform Subject.data into the structure that CdiscOdmExportWriter expects
"""
mkitemlist = lambda d: [dict(v, item_oid=k) for k, v in d.iteritems()] # `dict()` updates v with item_oid
def mkitemgrouplist(itemgroupdict):
itemgrouplist = []
for oid, item_groups in itemgroupdict.iteritems():
for i, item_group in enumerate(item_groups):
itemgrouplist.append({
'item_group_oid': oid,
'repeat_key': i + 1,
'items': mkitemlist(item_group.items)
})
return itemgrouplist
mkformslist = lambda d: [{'form_oid': k, 'item_groups': mkitemgrouplist(v)} for k, v in d.iteritems()]
def mkeventslist(eventsdict):
eventslist = []
for oid, study_events in eventsdict.iteritems():
for i, study_event in enumerate(study_events):
eventslist.append({
'study_event_oid': oid,
'repeat_key': i + 1,
'start_long': study_event.start_long,
'end_long': study_event.end_long,
'forms': mkformslist(study_event.forms)
})
return eventslist
return mkeventslist(self.data)
| bsd-3-clause | Python | |
1068a19af6fc6c5ec7be8d59cc4bb1d76eb40bc7 | add homeassistant-entrypoint.py | tonyldo/home-automation-mqtt-hass,tonyldo/home-automation-mqtt-hass | homeassistant-entrypoint.py | homeassistant-entrypoint.py | import os
import argparse
import homeassistant.config as config_util
from homeassistant.const import (
__version__,
EVENT_HOMEASSISTANT_START,
REQUIRED_PYTHON_VER,
RESTART_EXIT_CODE,
)
def get_arguments() -> argparse.Namespace:
"""Get parsed passed in arguments."""
parser = argparse.ArgumentParser(
description="Home Assistant: Observe, Control, Automate.")
parser.add_argument('--version', action='version', version=__version__)
parser.add_argument(
'-c', '--config',
metavar='path_to_config_dir',
default=config_util.get_default_config_dir(),
help="Directory that contains the Home Assistant configuration")
if os.name == "posix":
parser.add_argument(
'--daemon',
action='store_true',
help='Run Home Assistant as daemon')
arguments = parser.parse_args()
return arguments
args = get_arguments()
config_dir = os.path.join(os.getcwd(), args.config)
print('Config File:', config_util.ensure_config_exists(config_dir))
| mit | Python | |
8ed7c6d43bcef1543cd6e85a147051fde29b7580 | Add new language distance logic | LuminosoInsight/langcodes | langcodes/language_distance.py | langcodes/language_distance.py | from .data_dicts import LANGUAGE_DISTANCES
_DISTANCE_CACHE = {}
DEFAULT_LANGUAGE_DISTANCE = LANGUAGE_DISTANCES['*']['*']
DEFAULT_SCRIPT_DISTANCE = LANGUAGE_DISTANCES['*_*']['*_*']
DEFAULT_TERRITORY_DISTANCE = 4
# Territory clusters used in territory matching:
# Maghreb (the western Arab world)
MAGHREB = {'MA', 'DZ', 'TN', 'LY', 'MR', 'EH'}
# United States and its territories
US = {'AS', 'GU', 'MH', 'MP', 'PR', 'UM', 'US', 'VI'}
# Special Autonomous Regions of China
CNSAR = {'HK', 'MO'}
# North and South America
AMERICAS = {
# Caribbean
'AI', 'AG', 'AW', 'BS', 'BB', 'VG', 'BQ', 'KY', 'CU', 'CW', 'DM', 'DO',
'GD', 'GP', 'HT', 'JM', 'MQ', 'MS', 'PR', 'SX', 'BL', 'KN', 'LC', 'MF',
'VC', 'TT', 'TC', 'VI',
# Central America
'BZ', 'CR', 'SV', 'GT', 'HN', 'MX', 'NI', 'PA',
# North America
'BM', 'CA', 'GL', 'PM', 'US',
# South America
'AR', 'BO', 'BR', 'CL', 'CO', 'EC', 'FK', 'GF', 'GY', 'PY', 'PE', 'SR',
'UY', 'VE',
}
def tuple_distance_cached(desired: tuple, supported: tuple):
# We take in triples of (language, script, territory) that can be derived by
# 'maximizing' a language tag. First of all, if these are identical,
# return quickly:
if supported == desired:
return 0
if (desired, supported) in _DISTANCE_CACHE:
return _DISTANCE_CACHE[desired, supported]
else:
result = _tuple_distance(desired, supported)
_DISTANCE_CACHE[desired, supported] = result
return result
def _get2(dictionary: dict, key1: str, key2: str, default):
return dictionary.get(key1, {}).get(key2, default)
def _tuple_distance(desired: tuple, supported: tuple):
desired_language, desired_script, desired_territory = desired
supported_language, supported_script, supported_territory = supported
distance = 0
if desired_language != supported_language:
distance += _get2(LANGUAGE_DISTANCES, desired_language, supported_language, DEFAULT_LANGUAGE_DISTANCE)
desired_script_pair = '{}_{}'.format(desired_language, desired_script)
supported_script_pair = '{}_{}'.format(supported_language, supported_script)
if desired_script != supported_script:
# Scripts can match other scripts, but only when paired with a language. For example,
# there is no reason to assume someone who can read 'Latn' can read 'Cyrl', but there
# is plenty of reason to believe someone who can read 'sr-Latn' can read 'sr-Cyrl'
# because Serbian is a language written in two scripts.
distance += _get2(LANGUAGE_DISTANCES, desired_script_pair, supported_script_pair, DEFAULT_SCRIPT_DISTANCE)
if desired_territory != supported_territory:
# The rules for matching territories are too weird to implement the general case
# efficiently. Instead of implementing all the possible match rules the XML could define,
# instead we just reimplement the rules of CLDR 36.1 here in code.
tdist = DEFAULT_TERRITORY_DISTANCE
if desired_script_pair == supported_script_pair:
if desired_language == 'ar':
if (desired_territory in MAGHREB) != (supported_territory in MAGHREB):
tdist = 5
elif desired_language == 'en':
if (desired_territory == 'GB') and (supported_territory not in US):
tdist = 3
elif (desired_territory not in US) and (supported_territory == 'GB'):
tdist = 3
elif (desired_territory in US) != (supported_territory in US):
tdist = 5
elif desired_language == 'es' or desired_language == 'pt':
if (desired_territory in AMERICAS) != (supported_territory in AMERICAS):
tdist = 5
elif desired_language_pair == 'zh_Hant':
if (desired_territory in CNSAR) != (supported_territory in CNSAR):
tdist = 5
distance += tdist
return distance
| mit | Python | |
9d2a2b0e1f066b2606e62ec019b56d4659ed86b1 | Add cluster evaluation: adjusted rand index | studiawan/pygraphc | pygraphc/clustering/ClusterEvaluation.py | pygraphc/clustering/ClusterEvaluation.py | from sklearn import metrics
class ClusterEvaluation(object):
@staticmethod
def get_evaluated(evaluated_file):
with open(evaluated_file, 'r') as ef:
evaluations = ef.readlines()
evaluation_labels = [evaluation.split(';')[0] for evaluation in evaluations]
return evaluation_labels
@staticmethod
def get_adjusted_rand_score(standard_file, prediction_file):
standard_labels = ClusterEvaluation.get_evaluated(standard_file)
prediction_labels = ClusterEvaluation.get_evaluated(prediction_file)
return metrics.adjusted_rand_score(standard_labels, prediction_labels) | mit | Python | |
b4b8f3554ae569ed42ba261de89e2d186418e71e | add html helper. | why2pac/dp-tornado,why2pac/dp-tornado,why2pac/dp-tornado,why2pac/dp-tornado | dp_tornado/helper/html.py | dp_tornado/helper/html.py | # -*- coding: utf-8 -*-
from dp_tornado.engine.helper import Helper as dpHelper
try:
# py 2.x
import HTMLParser
html_parser = HTMLParser.HTMLParser()
except:
# py 3.4-
try:
import html.parser
html_parser = html.parser.HTMLParser()
except:
# py 3.4+
import html as html_parser
try:
import htmltag
except:
htmltag = None
import re
class HtmlHelper(dpHelper):
def strip_xss(self, html, whitelist=None, replacement='entities'):
if not htmltag:
raise Exception('htmltag library required.')
if whitelist is None:
whitelist = (
'a', 'abbr', 'aside', 'audio', 'bdi', 'bdo', 'blockquote', 'canvas',
'caption', 'code', 'col', 'colgroup', 'data', 'dd', 'del',
'details', 'div', 'dl', 'dt', 'em', 'figcaption', 'figure', 'h1',
'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'ins', 'kbd', 'li',
'mark', 'ol', 'p', 'pre', 'q', 'rp', 'rt', 'ruby', 's', 'samp',
'small', 'source', 'span', 'strong', 'sub', 'summary', 'sup',
'table', 'td', 'th', 'time', 'tr', 'track', 'u', 'ul', 'var',
'video', 'wbr', 'b')
return htmltag.strip_xss(html, whitelist, replacement)
def strip_tags(self, text):
return re.sub('<[^<]+?>', '', text)
def entity_decode(self, text):
return html_parser.unescape(text)
| mit | Python | |
f06e5a8c701f06d40597cd268a6739988c2fff56 | Add functions for parsing log file | dimagi/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq | corehq/apps/cleanup/tasks.py | corehq/apps/cleanup/tasks.py | import os
from collections import defaultdict
from django.conf import settings
from django.core.management import call_command
from celery.schedules import crontab
from celery.task import periodic_task
from datetime import datetime
from corehq.apps.cleanup.management.commands.fix_xforms_with_undefined_xmlns import \
parse_log_message, ERROR_SAVING, SET_XMLNS, MULTI_MATCH
def get_summary_stats_from_stream(stream):
summary = {
# A dictionary like: {
# "foo-domain": 7,
# "bar-domain": 3,
# }
'not_fixed': defaultdict(lambda: 0),
'fixed': defaultdict(lambda: 0),
'errors': defaultdict(lambda: 0),
'submitting_bad_forms': defaultdict(set),
'multi_match_builds': set(),
}
for line in stream:
level, event, extras = parse_log_message(line)
domain = extras.get('domain', '')
if event == ERROR_SAVING:
summary['errors'] += 1
elif event == SET_XMLNS or event == MULTI_MATCH:
summary['submitting_bad_forms'][domain].add(
extras.get('username', '')
)
if event == SET_XMLNS:
summary['fixed'][domain] += 1
if event == MULTI_MATCH:
summary['not_fixed'][domain] += 1
summary['multi_match_builds'].add(
(domain, extras.get('build_id', ''))
)
return summary
def pprint_stats(stats, outstream):
outstream.write("Number of errors: {}\n".format(sum(stats['errors'].values())))
outstream.write("Number of xforms that we could not fix: {}\n".format(sum(stats['not_fixed'].values())))
outstream.write("Number of xforms that we fixed: {}\n".format(sum(stats['fixed'].values())))
outstream.write("Domains and users that submitted bad xforms:\n")
for domain, users in sorted(stats['submitting_bad_forms'].items()):
outstream.write(
" {} ({} fixed, {} not fixed, {} errors)\n".format(
domain, stats['fixed'][domain], stats['not_fixed'][domain], stats['errors'][domain]
)
)
for user in sorted(list(users)):
outstream.write(" {}\n".format(user))
| bsd-3-clause | Python | |
095b9cc5f2e9a87220e6f40f88bf6ecd598ca681 | Add abstract box for ComponentSearch so users can drag it into their workflows. | CMUSV-VisTrails/WorkflowRecommendation,CMUSV-VisTrails/WorkflowRecommendation,CMUSV-VisTrails/WorkflowRecommendation | vistrails/packages/componentSearch/init.py | vistrails/packages/componentSearch/init.py | #Copied imports from HTTP package init.py file
from PyQt4 import QtGui
from core.modules.vistrails_module import ModuleError
from core.configuration import get_vistrails_persistent_configuration
from gui.utils import show_warning
import core.modules.vistrails_module
import core.modules
import core.modules.basic_modules
import core.modules.module_registry
import core.system
from core import debug
from component_search_form import *
class ComponentSearch(core.modules.vistrails_module.Module):
pass
def initialize(*args, **keywords):
reg = core.modules.module_registry.get_module_registry()
basic = core.modules.basic_modules
reg.add_module(ComponentSearch, abstract=True)
| bsd-3-clause | Python | |
c3a9d78ca3ffbad0e11192e896db8cd0c2758154 | UPDATE - add migration file | mingkim/QuesCheetah,mingkim/QuesCheetah,mingkim/QuesCheetah,mingkim/QuesCheetah | vote/migrations/0002_auto_20160315_0006.py | vote/migrations/0002_auto_20160315_0006.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('vote', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='multiquestion',
name='group_name',
field=models.CharField(max_length=100),
),
]
| mit | Python | |
23ad5049477c272ca1666f90e73246c4de8e5c48 | Add utility functions for processing partitioned objects | dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq | corehq/sql_db/util.py | corehq/sql_db/util.py | from corehq.form_processor.backends.sql.dbaccessors import ShardAccessor
from corehq.sql_db.config import partition_config
from django.conf import settings
def get_object_from_partitioned_database(model_class, partition_value, partitioned_field='pk'):
"""
Determines from which database to retrieve a paritioned model object and
retrieves it.
:param model_class: A Django model class
:param parition_value: The value that is used to partition the model; this
value will be used to select the database
:param partitioned_field: The model field on which the object is partitioned; the
object whose partitioned_field attribute equals partition_value is returned
:return: The model object
"""
if settings.USE_PARTITIONED_DATABASE:
db_name = ShardAccessor.get_database_for_doc(partition_value)
else:
db_name = 'default'
kwargs = {
partitioned_field: partition_value,
}
return model_class.objects.using(db_name).get(**kwargs)
def save_object_to_partitioned_database(obj, partition_value):
"""
Determines to which database to save a partitioned model object and
saves it there.
:param obj: A Django model object
:param parition_value: The value that is used to partition the model; this
value will be used to select the database
"""
if settings.USE_PARTITIONED_DATABASE:
db_name = ShardAccessor.get_database_for_doc(partition_value)
else:
db_name = 'default'
super(obj.__class__, obj).save(using=db_name)
def delete_object_from_partitioned_database(obj, partition_value):
"""
Determines from which database to delete a partitioned model object and
deletes it there.
:param obj: A Django model object
:param parition_value: The value that is used to partition the model; this
value will be used to select the database
"""
if settings.USE_PARTITIONED_DATABASE:
db_name = ShardAccessor.get_database_for_doc(partition_value)
else:
db_name = 'default'
super(obj.__class__, obj).delete(using=db_name)
def run_query_across_partitioned_databases(model_class, q_expression, values=None):
"""
Runs a query across all partitioned databases and produces a generator
with the results.
:param model_class: A Django model class
:param q_expression: An instance of django.db.models.Q representing the
filter to apply
:param values: (optional) If specified, should be a list of values to retrieve rather
than retrieving entire objects. If a list with a single value is given, the result will
be a generator of single values. If a list with multiple values is given, the result
will be a generator of tuples.
:return: A generator with the results
"""
if settings.USE_PARTITIONED_DATABASE:
db_names = partition_config.get_form_processing_dbs()
else:
db_names = ['default']
if values and not isinstance(values, (list, tuple)):
raise ValueError("Expected a list or tuple")
for db_name in db_names:
qs = model_class.objects.using(db_name).filter(q_expression)
if values:
if len(values) == 1:
qs = qs.values_list(*values, flat=True)
else:
qs = qs.values_list(*values)
for result in qs:
yield result
| bsd-3-clause | Python | |
b073397008a07e50b79c7cf97d91765de3fa4fed | add daemon example | guixing/opstools | library/daemon.py | library/daemon.py |
import time
import os
import sys
def run():
while True:
print '1time'
time.sleep(1)
def daemon():
try:
pid = os.fork()
if pid > 0:
sys.exit(0)
except OSError, e:
print 'fork #1 fail', e
sys.exit(1)
os.chdir('/')
os.setsid()
os.umask(0)
try:
pid = os.fork()
if pid > 0:
print 'daemon pid %d' % pid
sys.exit(0)
except OSError, e:
print 'fork #2 fail', e
sys.exit(1)
nulldev = '/dev/null'
stdin = file(nulldev, 'r')
stdout = file('/tmp/stdout', 'a+', 0)
stderr = file(nulldev, 'a+', 0)
os.dup2(stdin.fileno(), sys.stdin.fileno())
os.dup2(stdout.fileno(), sys.stdout.fileno())
os.dup2(stderr.fileno(), sys.stderr.fileno())
return pid
if __name__ == '__main__':
if len(sys.argv) == 2 and sys.argv[1] == '-d':
daemon()
run()
| mit | Python | |
c9cc92c47192132926660efb3416349a5f946a89 | Test for ConversationMessage serializer | yunity/foodsaving-backend,yunity/foodsaving-backend,yunity/yunity-core,yunity/foodsaving-backend,yunity/yunity-core | yunity/tests/unit/test__api_serializers.py | yunity/tests/unit/test__api_serializers.py | from yunity.api import serializers
from yunity.models import Conversation as ConversationModel
from yunity.utils.tests.abc import BaseTestCase, AnyResult
class TestSerializers(BaseTestCase):
def test_chat_with_empty_messages_serializable(self):
self.given_data(ConversationModel.objects.create())
self.when_calling(serializers.conversation)
self.then_invocation_passed_with(AnyResult())
| agpl-3.0 | Python | |
bddea45a0ddf4efa911aae8af74fb1c78b91b152 | Create max2.py | HarendraSingh22/Python-Guide-for-Beginners | Code/max2.py | Code/max2.py | #Program to find maximum of 2 numbers
while 1:
n,m=map(int, raw_input())
k = max(m,n)
print k
print "Do you want to continue(yes/no): "
s=raw_input()
if s=="no":
break
| mit | Python | |
25b19d18c75feeb46139898e9a9c270d909e135e | add L4 quiz - Ember Shortcuts and Aliases | udacity/course-front-end-frameworks,udacity/course-front-end-frameworks,udacity/course-front-end-frameworks | lesson4/quizEmberShortcuts/unit_tests.py | lesson4/quizEmberShortcuts/unit_tests.py | is_correct = False
port_alias = widget_inputs["check1"]
new_alias = widget_inputs["check2"]
help_alias = widget_inputs["check3"]
generate_alias = widget_inputs["check4"]
comments = []
def commentizer(new):
if new not in comments:
comments.append(new)
if not port_alias:
is_correct = is_correct and False
commentizer("Did you look through the Ember-CLI's help information?")
else:
is_correct = True
if new_alias:
is_correct = is_correct and False
commentizer("Hmm, I couldn't find `n` as a shortcut for `new`. Are you sure that's accurate?")
else:
is_correct = is_correct and True
if not help_alias:
is_correct = is_correct and False
commentizer("This one's easy to test; try running `ember --help` and then `ember help` on the command line. Did you get the same output?")
else:
is_correct = is_correct and True
if generate_alias:
is_correct = is_correct and False
commentizer("Generate does have an alias!...but are you sure this is the right one?")
else:
is_correct = is_correct and True
# if they're all unchecked
if not any([port_alias, new_alias, help_alias, generate_alias]):
is_correct = False
comments = []
comments.append("Some, maybe all, of these are correct. Why don't you check the ember website or poke around in the output from the `ember` command to see if you can figure out which one(s) are correct aliases.")
if is_correct:
commentizer("Great job! Aliases are extremely helpful, and professionals use them religiously! If you really want to get better in Ember, learn the aliases so you can be more efficient.")
grade_result["comment"] = "\n\n".join(comments)
grade_result["correct"] = is_correct
| mit | Python | |
9a9ee99129cee92c93fbc9e2cc24b7b933d51aac | Add on_delete in foreign keys. | hackerkid/zulip,dhcrzf/zulip,Galexrt/zulip,showell/zulip,amanharitsh123/zulip,zulip/zulip,vabs22/zulip,vaidap/zulip,jackrzhang/zulip,jrowan/zulip,hackerkid/zulip,zulip/zulip,tommyip/zulip,kou/zulip,jackrzhang/zulip,brockwhittaker/zulip,brockwhittaker/zulip,tommyip/zulip,shubhamdhama/zulip,eeshangarg/zulip,shubhamdhama/zulip,brockwhittaker/zulip,Galexrt/zulip,brainwane/zulip,andersk/zulip,brockwhittaker/zulip,amanharitsh123/zulip,vabs22/zulip,synicalsyntax/zulip,tommyip/zulip,verma-varsha/zulip,kou/zulip,mahim97/zulip,rht/zulip,amanharitsh123/zulip,shubhamdhama/zulip,dhcrzf/zulip,mahim97/zulip,jrowan/zulip,andersk/zulip,timabbott/zulip,eeshangarg/zulip,synicalsyntax/zulip,brainwane/zulip,andersk/zulip,shubhamdhama/zulip,rht/zulip,rishig/zulip,synicalsyntax/zulip,synicalsyntax/zulip,Galexrt/zulip,rishig/zulip,zulip/zulip,jrowan/zulip,verma-varsha/zulip,dhcrzf/zulip,rht/zulip,synicalsyntax/zulip,tommyip/zulip,eeshangarg/zulip,amanharitsh123/zulip,timabbott/zulip,hackerkid/zulip,kou/zulip,andersk/zulip,tommyip/zulip,punchagan/zulip,hackerkid/zulip,vabs22/zulip,mahim97/zulip,punchagan/zulip,verma-varsha/zulip,showell/zulip,vaidap/zulip,jrowan/zulip,mahim97/zulip,rishig/zulip,jackrzhang/zulip,timabbott/zulip,timabbott/zulip,vabs22/zulip,kou/zulip,Galexrt/zulip,brainwane/zulip,tommyip/zulip,tommyip/zulip,rishig/zulip,showell/zulip,shubhamdhama/zulip,verma-varsha/zulip,rishig/zulip,dhcrzf/zulip,Galexrt/zulip,verma-varsha/zulip,Galexrt/zulip,timabbott/zulip,eeshangarg/zulip,jrowan/zulip,brainwane/zulip,zulip/zulip,eeshangarg/zulip,dhcrzf/zulip,rishig/zulip,brockwhittaker/zulip,punchagan/zulip,mahim97/zulip,vabs22/zulip,vabs22/zulip,andersk/zulip,jackrzhang/zulip,vaidap/zulip,eeshangarg/zulip,hackerkid/zulip,jackrzhang/zulip,zulip/zulip,shubhamdhama/zulip,mahim97/zulip,timabbott/zulip,kou/zulip,verma-varsha/zulip,rishig/zulip,hackerkid/zulip,brainwane/zulip,kou/zulip,zulip/zulip,showell/zulip,punchagan/zulip,jrowan/zulip,vaidap/zulip,punchagan/zulip,punchagan/zulip,rht/zulip,eeshangarg/zulip,kou/zulip,showell/zulip,punchagan/zulip,shubhamdhama/zulip,rht/zulip,timabbott/zulip,amanharitsh123/zulip,jackrzhang/zulip,andersk/zulip,dhcrzf/zulip,dhcrzf/zulip,rht/zulip,showell/zulip,brainwane/zulip,synicalsyntax/zulip,amanharitsh123/zulip,showell/zulip,jackrzhang/zulip,zulip/zulip,vaidap/zulip,Galexrt/zulip,hackerkid/zulip,vaidap/zulip,andersk/zulip,brockwhittaker/zulip,rht/zulip,brainwane/zulip,synicalsyntax/zulip | confirmation/migrations/0001_initial.py | confirmation/migrations/0001_initial.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Confirmation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('object_id', models.PositiveIntegerField()),
('date_sent', models.DateTimeField(verbose_name='sent')),
('confirmation_key', models.CharField(max_length=40, verbose_name='activation key')),
('content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType')),
],
options={
'verbose_name': 'confirmation email',
'verbose_name_plural': 'confirmation emails',
},
bases=(models.Model,),
),
]
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Confirmation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('object_id', models.PositiveIntegerField()),
('date_sent', models.DateTimeField(verbose_name='sent')),
('confirmation_key', models.CharField(max_length=40, verbose_name='activation key')),
('content_type', models.ForeignKey(to='contenttypes.ContentType')),
],
options={
'verbose_name': 'confirmation email',
'verbose_name_plural': 'confirmation emails',
},
bases=(models.Model,),
),
]
| apache-2.0 | Python |
b807fe4e5a2120acb93f8f67ba44fa3c6b7ce92f | Add a benchmark for observation speeds. | deepmind/pysc2 | pysc2/bin/benchmark_observe.py | pysc2/bin/benchmark_observe.py | #!/usr/bin/python
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Benchmark observation times."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from absl import app
from absl import flags
from future.builtins import range # pylint: disable=redefined-builtin
from pysc2 import maps
from pysc2 import run_configs
from pysc2.lib import point_flag
from s2clientprotocol import common_pb2 as sc_common
from s2clientprotocol import sc2api_pb2 as sc_pb
flags.DEFINE_bool("raw", False, "Enable raw rendering")
point_flag.DEFINE_point("feature_size", "64", "Resolution for feature layers.")
point_flag.DEFINE_point("rgb_size", "64", "Resolution for rgb observations.")
FLAGS = flags.FLAGS
def main(unused_argv):
interface = sc_pb.InterfaceOptions()
interface.score = True
interface.raw = FLAGS.raw
if FLAGS.feature_size:
interface.feature_layer.width = 24
FLAGS.feature_size.assign_to(interface.feature_layer.resolution)
FLAGS.feature_size.assign_to(interface.feature_layer.minimap_resolution)
if FLAGS.rgb_size:
FLAGS.rgb_size.assign_to(interface.render.resolution)
FLAGS.rgb_size.assign_to(interface.render.minimap_resolution)
timeline = []
try:
run_config = run_configs.get()
with run_config.start() as controller:
map_inst = maps.get("Simple64")
create = sc_pb.RequestCreateGame(
realtime=False, disable_fog=False, random_seed=1,
local_map=sc_pb.LocalMap(map_path=map_inst.path,
map_data=map_inst.data(run_config)))
create.player_setup.add(type=sc_pb.Participant)
create.player_setup.add(type=sc_pb.Computer, race=sc_common.Terran,
difficulty=sc_pb.VeryEasy)
join = sc_pb.RequestJoinGame(race=sc_common.Random, options=interface)
controller.create_game(create)
controller.join_game(join)
for _ in range(500):
controller.step()
start = time.time()
obs = controller.observe()
timeline.append(time.time() - start)
if obs.player_result:
break
except KeyboardInterrupt:
pass
print("Timeline:")
for t in timeline:
print(t * 1000)
if __name__ == "__main__":
app.run(main)
| apache-2.0 | Python | |
46fdde37823c1c52bd78fc0922ec97a832fc191e | add 031 | ufjfeng/leetcode-jf-soln,ufjfeng/leetcode-jf-soln | python/031_next_permutation.py | python/031_next_permutation.py | """
Implement next permutation, which rearranges numbers into the lexicographically
next greater permutation of numbers.
If such arrangement is not possible, it must rearrange it as the lowest
possible order (ie, sorted in ascending order).
The replacement must be in-place, do not allocate extra memory.
Here are some examples. Inputs are in the left-hand column and its
corresponding outputs are in the right-hand column.
1,2,3 → 1,3,2
3,2,1 → 1,2,3
1,1,5 → 1,5,1
"""
class Solution(object):
def nextPermutation(self, nums):
"""
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
l = len(nums)
for i in reversed(range(l)):
if nums[i - 1] < nums[i]:
break
if i > 0:
for j in reversed(range(l)):
if nums[j] > nums[i - 1]:
nums[i - 1], nums[j] = nums[j], nums[i - 1]
break
nums[i:] = nums[i::-1]
| mit | Python | |
548cd2efbb73e39a3e067cb66d5fc466d797cfb9 | add python dir for random test code that I write | fretboardfreak/code,fretboardfreak/code,fretboardfreak/code,fretboardfreak/code,fretboardfreak/code,fretboardfreak/code,fretboardfreak/code,fretboardfreak/code,fretboardfreak/code | python/context_manager_test.py | python/context_manager_test.py | #!/usr/bin/env python3
"""
This script was used to verify the behaviour of nested context managers in
python 3.
"""
import tempfile
from contextlib import contextmanager
@contextmanager
def first_manager():
print('Entering First Manager')
yield
print('Exiting First Manager')
@contextmanager
def second_manager(argument=None):
print("Entering Second Manager: {}".format(argument))
yield
print("Exiting Second Manager")
if __name__ == "__main__":
print('Running nested context manager test...')
with first_manager(), \
tempfile.TemporaryDirectory() as temp_dir, \
second_manager(temp_dir):
print('Inside context')
print('temp dir = {}'.format(temp_dir))
print('Done')
| apache-2.0 | Python | |
a394375911b11d06ad5466f45a5fcda4b970febf | Create get_gene_co-ordinates.py | ElizabethSutton/RNA-seq_analysis_tools | Get_gene_co-ordinates/get_gene_co-ordinates.py | Get_gene_co-ordinates/get_gene_co-ordinates.py | #!/usr/bin/env python
# dealing with command line input
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-genes", type=str)
parser.add_argument("-gtf", type=str)
args = parser.parse_args()
genes_filename = args.genes
gtf_filename = args.gtf
# making list of genes from genes file
file = open(genes_filename)
genes = []
for line in file:
gene = line.rstrip()
genes.append(gene)
file.close()
# extracting co-ordinates from gtf file
delimiter = '\t'
start_co_ordinates = []
stop_co_ordinates = []
scaffolds = []
for gene in genes:
file = open(gtf_filename)
co_ordinates = []
for line in file:
split = line.split(delimiter)
info = split[8]
info = info.split(';')
gene_id = info[0]
gene_id = gene_id.split('"')
gene_id = gene_id[1]
if gene_id == gene:
start_co_ordinate = int(split[3])
stop_co_ordinate = int(split[4])
co_ordinates.append(start_co_ordinate)
co_ordinates.append(stop_co_ordinate)
scaffold = split[0]
start_co_ordinates.append(min(co_ordinates)-1)
stop_co_ordinates.append(max(co_ordinates))
scaffolds.append(scaffold)
file.close()
# writing output
output = open('gene_co-ordinates.bed', 'w')
i = 0
for gene in genes:
output.write(scaffolds[i] + '\t' + str(start_co_ordinates[i]) + '\t' + str(stop_co_ordinates[i]) + '\t' + gene + '\n')
i = i + 1
output.close()
| mit | Python | |
5fc129f83dc68bb005dea34a61e2bbe1751e2ca3 | add custom module: ec2_placement_group | Kitware/gobig,Kitware/gobig,opadron/gobig,opadron/gobig | library/ec2_placement_group.py | library/ec2_placement_group.py | #!/usr/bin/python
import sys
__arg_spec = None
def get_arg_spec():
if __arg_spec is not None: return __arg_spec
strats = ["cluster"]
states = ["present", "absent"]
__ arg_spec = ec2_argument_spec()
arg_spec.update({
"name" : { "required": True, "type": "str" },
"strategy": { "default" : "cluster", "type": "str", "choices": strats },
"state" : { "default" : "present", "type": "str", "choices": states },
})
return __arg_spec
def main():
module = AnsibleModule(argument_spec=get_arg_spec())
try:
import boto.ec2
except ImportError:
module.fail_json(msg="module not found: boto.ec2")
name = module.params.get("name")
strategy = module.params.get("strategy")
state = module.params.get("state")
ec2 = ec2_connect(module)
group_exists = any(
group.name == name
for group in ec2.get_all_placement_groups(filters={"group-name": name})
)
msg = "nothing to do"
changed = False
if state == "present" and not group_exists:
ec2.create_placement_group(name)
msg = "placement group {} created".format(name)
changed = True
elif state == "absent" and group_exists:
ec2.delete_placement_group(name)
msg = "placement group {} removed".format(name)
changed = True
module.exit_json(msg=msg,
name=name,
strategy=strategy,
changed=changed)
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()
| apache-2.0 | Python | |
a182633b03f654f9d8f933aab3c0aa99f8deadd2 | Add alg_bfs.py | bowen0701/algorithms_data_structures | alg_bfs.py | alg_bfs.py | from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from ds_queue import Queue
def breadth_first_search():
pass
def main():
pass
if __name__ == '__main__':
main()
| bsd-2-clause | Python | |
2270317a8abbc288cdb757ba583fed5135317e67 | Add embedding example | mindriot101/IPython-Notebook-Tutorial,mindriot101/IPython-Notebook-Tutorial,mindriot101/IPython-Notebook-Tutorial | embedding-example/main.py | embedding-example/main.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import argparse
import logging
logging.basicConfig(
level='DEBUG', format='%(asctime)s|%(name)s|%(levelname)s|%(message)s')
logger = logging.getLogger(__name__)
def main(args):
logger.debug(args)
a = 10
import IPython; IPython.embed(); exit()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
main(parser.parse_args())
| unlicense | Python | |
714829cb8ecec9edec07b4fce9a3340ee228e77f | Add encoding benchmark | tomashaber/raiden,tomaaron/raiden,charles-cooper/raiden,tomaaron/raiden,hackaugusto/raiden,tomashaber/raiden,tomaaron/raiden,tomaaron/raiden,tomashaber/raiden,tomashaber/raiden,hackaugusto/raiden,charles-cooper/raiden,tomashaber/raiden | raiden/tests/encoding_speed.py | raiden/tests/encoding_speed.py | import pytest
from timeit import timeit
setup = """
import cPickle
import umsgpack
from raiden.messages import Ping, decode, MediatedTransfer, Lock, Ack
from raiden.utils import privtoaddr, sha3
privkey = 'x' * 32
address = privtoaddr(privkey)
m0 = Ping(nonce=0)
m0.sign(privkey)
m1 = MediatedTransfer(10, address, 100, address, address,
Lock(100, 50, sha3(address)), address, address)
m1.sign(privkey)
m2 = Ack(address, sha3(privkey))
"""
exec(setup)
codecs = {
'rlp': 'd = {}.encode(); decode(d)',
'cPickle': 'd = cPickle.dumps({}, 2); cPickle.loads(d)',
'msgpack': 'd = umsgpack.packb({}.serialize()); umsgpack.unpackb(d)'
}
for m in ('m0', 'm1', 'm2'):
msg_name = locals()[m]
print("\n{}".format(msg_name))
for codec_name, code_base in codecs.items():
code = code_base.format(m)
exec(code)
print('{} encoded {} size: {}'.format(codec_name, msg_name, len(d)))
result = timeit(code, setup, number=10000)
print '{} {} (en)(de)coding speed: {}'.format(codec_name, msg_name, result)
| mit | Python | |
385c46ec48593cd7d3233e0ab7cfca98c321bcf3 | Create 3dproject.py | danielwilson2017/3DProjectReal | 3dproject.py | 3dproject.py | mit | Python | ||
a97f7931b597a8c4273fb7e47081f5c1224e1441 | add new module | cellnopt/cellnopt,cellnopt/cellnopt | cno/core/standalone.py | cno/core/standalone.py | # -*- python -*-
#
# This file is part of CNO software
#
# Copyright (c) 2014 - EBI-EMBL
#
# File author(s): Thomas Cokelaer <cokelaer@ebi.ac.uk>
#
# Distributed under the GPLv3 License.
# See accompanying file LICENSE.txt or copy at
# http://www.gnu.org/licenses/gpl-3.0.html
#
# website: http://github.com/cellnopt/cellnopt
#
##############################################################################
import sys
class Standalone(object):
"""Common class for all standalone applications"""
def __init__(self, args, user_options):
# stores the arguments
self.args = args
self.user_options = user_options
if len(args) == 1:
# shows the help message if no arguments provided
self.help()
else:
# The user values should be used to update the
# user_options
options = self.user_options.parse_args(args[1:])
# Should update the CNOConfig file with the provided options
for key in self.user_options.config.keys():
for option in self.user_options.config[key]._get_names():
value = getattr(options, option)
setattr(getattr( getattr(self.user_options.config, key), option ), 'value', value)
self.options = options
def help(self):
self.user_options.parse_args(["prog", "--help"])
def report(self):
"""Create report and shows report (or not)"""
if self.options.onweb is True:
self.trainer.report(show=True)
elif self.options.report is True:
self.trainer.report(show=False)
else:
from easydev.console import red
print(red("No report requested; nothing will be saved or shown"))
print("use --on-web or --report options")
| bsd-2-clause | Python | |
8c6ce0a8b30bd000687eac9593ecbdc07130bd0e | Add lc739_daily_temperatures.py | bowen0701/algorithms_data_structures | lc739_daily_temperatures.py | lc739_daily_temperatures.py | """Leetcode 739. Daily Temperatures
Medium
Given a list of daily temperatures T, return a list such that, for each day
in the input, tells you how many days you would have to wait until a warmer
temperature. If there is no future day for which this is possible, put 0
instead.
For example, given the list of temperatures T = [73, 74, 75, 71, 69, 72, 76, 73],
your output should be [1, 1, 4, 2, 1, 1, 0, 0].
Note: The length of temperatures will be in the range [1, 30000]. Each
temperature will be an integer in the range [30, 100].
"""
class Solution(object):
def dailyTemperatures(self, T):
"""
:type T: List[int]
:rtype: List[int]
"""
pass
def main():
pass
if __name__ == '__main__':
main()
| bsd-2-clause | Python | |
7ab0cc93703abf6716b353f38a009897ab154ce4 | Add the plugin framework from common; use and test. | n0ano/ganttclient | nova/tests/test_plugin_api_extensions.py | nova/tests/test_plugin_api_extensions.py | # Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pkg_resources
import nova
from nova.api.openstack.compute import extensions as computeextensions
from nova.api.openstack import extensions
from nova.openstack.common.plugin import plugin
from nova.openstack.common.plugin import pluginmanager
from nova import test
class StubController(object):
def i_am_the_stub(self):
pass
class StubControllerExtension(extensions.ExtensionDescriptor):
"""This is a docstring. We need it."""
name = 'stubextension'
alias = 'stubby'
def get_resources(self):
resources = []
res = extensions.ResourceExtension('testme',
StubController())
resources.append(res)
return resources
service_list = []
class TestPluginClass(plugin.Plugin):
def __init__(self, service_name):
super(TestPluginClass, self).__init__(service_name)
self._add_api_extension_descriptor(StubControllerExtension)
service_list.append(service_name)
class MockEntrypoint(pkg_resources.EntryPoint):
def load(self):
return TestPluginClass
class APITestCase(test.TestCase):
"""Test case for the plugin api extension interface"""
def test_add_extension(self):
def mock_load(_s):
return TestPluginClass()
def mock_iter_entry_points(_t):
return [MockEntrypoint("fake", "fake", ["fake"])]
self.stubs.Set(pkg_resources, 'iter_entry_points',
mock_iter_entry_points)
global service_list
service_list = []
# Marking out the default extension paths makes this test MUCH faster.
self.flags(osapi_compute_extension=[])
self.flags(osapi_volume_extension=[])
found = False
mgr = computeextensions.ExtensionManager()
for res in mgr.get_resources():
# We have to use this weird 'dir' check because
# the plugin framework muddies up the classname
# such that 'isinstance' doesn't work right.
if 'i_am_the_stub' in dir(res.controller):
found = True
self.assertTrue(found)
self.assertEqual(len(service_list), 1)
self.assertEqual(service_list[0], 'compute-extensions')
| apache-2.0 | Python | |
19218b0b1f2198c1b0c01594658a3a4cd4dd0444 | Remove no more needed try import | vheon/JediHTTP,vheon/JediHTTP,micbou/JediHTTP,micbou/JediHTTP | jedihttp/hmac_plugin.py | jedihttp/hmac_plugin.py | # Copyright 2015 Cedraro Andrea <a.cedraro@gmail.com>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from bottle import request, response, abort
import hmaclib
try:
from urllib.parse import urlparse
from http import client as httplib
except ImportError:
from urlparse import urlparse
import httplib
class HmacPlugin( object ):
"""
Bottle plugin for hmac request authentication
http://bottlepy.org/docs/dev/plugindev.html
"""
name = 'hmac'
api = 2
def __init__( self, hmac_secret ):
self._hmachelper = hmaclib.JediHTTPHmacHelper( hmac_secret )
self._logger = logging.getLogger( __name__ )
def __call__( self, callback ):
def wrapper( *args, **kwargs ):
if not IsLocalRequest():
self._logger.info( 'Dropping request with bad Host header.' )
abort( httplib.UNAUTHORIZED,
'Unauthorized, received request from non-local Host.' )
return
if not self.IsRequestAuthenticated():
self._logger.info( 'Dropping request with bad HMAC.' )
abort( httplib.UNAUTHORIZED, 'Unauthorized, received bad HMAC.' )
return
body = callback( *args, **kwargs )
self.SignResponseHeaders( response.headers, body )
return body
return wrapper
def IsRequestAuthenticated( self ):
return self._hmachelper.IsRequestAuthenticated( request.headers,
request.method,
request.path,
request.body.read() )
def SignResponseHeaders( self, headers, body ):
self._hmachelper.SignResponseHeaders( headers, body )
def IsLocalRequest():
host = urlparse( 'http://' + request.headers[ 'host' ] ).hostname
return host == '127.0.0.1' or host == 'localhost'
| # Copyright 2015 Cedraro Andrea <a.cedraro@gmail.com>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from bottle import request, response, abort
# XXX(vheon): If I don't do this then some tests are going to fail because
# handlers.py could not find utils.py; I don't know why though.
try:
import hmaclib
except:
from . import hmaclib
try:
from urllib.parse import urlparse
from http import client as httplib
except ImportError:
from urlparse import urlparse
import httplib
class HmacPlugin( object ):
"""
Bottle plugin for hmac request authentication
http://bottlepy.org/docs/dev/plugindev.html
"""
name = 'hmac'
api = 2
def __init__( self, hmac_secret ):
self._hmachelper = hmaclib.JediHTTPHmacHelper( hmac_secret )
self._logger = logging.getLogger( __name__ )
def __call__( self, callback ):
def wrapper( *args, **kwargs ):
if not IsLocalRequest():
self._logger.info( 'Dropping request with bad Host header.' )
abort( httplib.UNAUTHORIZED,
'Unauthorized, received request from non-local Host.' )
return
if not self.IsRequestAuthenticated():
self._logger.info( 'Dropping request with bad HMAC.' )
abort( httplib.UNAUTHORIZED, 'Unauthorized, received bad HMAC.' )
return
body = callback( *args, **kwargs )
self.SignResponseHeaders( response.headers, body )
return body
return wrapper
def IsRequestAuthenticated( self ):
return self._hmachelper.IsRequestAuthenticated( request.headers,
request.method,
request.path,
request.body.read() )
def SignResponseHeaders( self, headers, body ):
self._hmachelper.SignResponseHeaders( headers, body )
def IsLocalRequest():
host = urlparse( 'http://' + request.headers[ 'host' ] ).hostname
return host == '127.0.0.1' or host == 'localhost'
| apache-2.0 | Python |
3cdc40906fb055679b0989ee98cb808a741caa12 | Solve challenge 10 | HKuz/PythonChallenge | Challenges/chall_10.py | Challenges/chall_10.py | #!/Applications/anaconda/envs/Python3/bin
# Python challenge - 10
# http://www.pythonchallenge.com/pc/return/bull.html
# http://www.pythonchallenge.com/pc/return/sequence.txt
def main():
'''
Hint: len(a[30]) = ?
a = [1, 11, 21, 1211, 111221,
<area shape="poly" coords="146, ..., 399" href="sequence.txt">
'''
a_orig = ['1', '11', '21', '1211', '111221']
a = ['1']
for i in range(30):
curr_string = a[i]
new_string = ''
prev_char = curr_string[0]
pos = 1
count = 1
while pos < len(curr_string):
# Check if character same as prior
# Y: increment count and position
# N: add to newS and reset
curr_char = curr_string[pos]
if curr_char == prev_char:
# Same num, increment count and position
count += 1
pos += 1
else:
# Different num; tack count and num to new_string, reset vars
new_string += str(count) + prev_char
prev_char = curr_string[pos]
count = 1
pos += 1
# Pick up the last number and its count
new_string += str(count) + prev_char
a.append(new_string)
# print(a)
# print(a[30])
print('Length of a[30]: {}'.format(len(a[30])))
return 0
# Keyword: 5808
if __name__ == '__main__':
main()
| mit | Python | |
7d92ef550b1b1d649fe460959d823248b72f336e | Add staging settings module | Mystopia/fantastic-doodle | service/settings/staging.py | service/settings/staging.py | from service.settings.production import *
ALLOWED_HOSTS = [
'fantastic-doodle--staging.herokuapp.com',
]
| unlicense | Python | |
27db11c0e2887f3d8b5ae5c95fa602f778f872ba | fix widgets safe_import on python 3.2 | kopchik/qtile,rxcomm/qtile,StephenBarnes/qtile,EndPointCorp/qtile,frostidaho/qtile,dequis/qtile,kopchik/qtile,kynikos/qtile,cortesi/qtile,aniruddhkanojia/qtile,soulchainer/qtile,encukou/qtile,aniruddhkanojia/qtile,apinsard/qtile,frostidaho/qtile,de-vri-es/qtile,xplv/qtile,qtile/qtile,zordsdavini/qtile,kiniou/qtile,zordsdavini/qtile,ramnes/qtile,apinsard/qtile,farebord/qtile,andrewyoung1991/qtile,himaaaatti/qtile,de-vri-es/qtile,himaaaatti/qtile,nxnfufunezn/qtile,StephenBarnes/qtile,jdowner/qtile,cortesi/qtile,w1ndy/qtile,encukou/qtile,farebord/qtile,flacjacket/qtile,ramnes/qtile,flacjacket/qtile,EndPointCorp/qtile,qtile/qtile,tych0/qtile,kiniou/qtile,jdowner/qtile,andrewyoung1991/qtile,kseistrup/qtile,kynikos/qtile,dequis/qtile,xplv/qtile,rxcomm/qtile,tych0/qtile,nxnfufunezn/qtile,kseistrup/qtile,soulchainer/qtile,w1ndy/qtile | libqtile/widget/__init__.py | libqtile/widget/__init__.py | import logging
import traceback
import importlib
logger = logging.getLogger('qtile')
def safe_import(module_name, class_name):
"""
try to import a module, and if it fails because an ImporError
it logs on WARNING, and logs the traceback on DEBUG level
"""
if type(class_name) is list:
for name in class_name:
safe_import(module_name, name)
return
package = __package__
# python 3.2 don't set __package__
if not package:
package = __name__
try:
module = importlib.import_module(module_name, package)
globals()[class_name] = getattr(module, class_name)
except ImportError as error:
msg = "Can't Import Widget: '%s.%s', %s"
logger.warn(msg % (module_name, class_name, error))
logger.debug(traceback.format_exc())
safe_import(".backlight", "Backlight")
safe_import(".battery", ["Battery", "BatteryIcon"])
safe_import(".clock", "Clock")
safe_import(".currentlayout", "CurrentLayout")
safe_import(".debuginfo", "DebugInfo")
safe_import(".graph", ["CPUGraph", "MemoryGraph", "SwapGraph", "NetGraph",
"HDDGraph", "HDDBusyGraph"])
safe_import(".groupbox", ["AGroupBox", "GroupBox"])
safe_import(".maildir", "Maildir")
safe_import(".notify", "Notify")
safe_import(".prompt", "Prompt")
safe_import(".sensors", "ThermalSensor")
safe_import(".sep", "Sep")
safe_import(".she", "She")
safe_import(".spacer", "Spacer")
safe_import(".systray", "Systray")
safe_import(".textbox", "TextBox")
safe_import(".volume", "Volume")
safe_import(".windowname", "WindowName")
safe_import(".windowtabs", "WindowTabs")
safe_import(".keyboardlayout", "KeyboardLayout")
safe_import(".df", "DF")
safe_import(".image", "Image")
safe_import(".gmail_checker", "GmailChecker")
safe_import(".clipboard", "Clipboard")
safe_import(".countdown", "Countdown")
safe_import(".tasklist", "TaskList")
safe_import(".pacman", "Pacman")
safe_import(".launchbar", "LaunchBar")
safe_import(".canto", "Canto")
safe_import(".mpriswidget", "Mpris")
safe_import(".mpris2widget", "Mpris2")
safe_import(".mpdwidget", "Mpd")
safe_import(".yahoo_weather", "YahooWeather")
safe_import(".bitcoin_ticker", "BitcoinTicker")
safe_import(".wlan", "Wlan")
safe_import(".google_calendar", "GoogleCalendar")
| import logging
import traceback
import importlib
logger = logging.getLogger('qtile')
def safe_import(module_name, class_name):
"""
try to import a module, and if it fails because an ImporError
it logs on WARNING, and logs the traceback on DEBUG level
"""
if type(class_name) is list:
for name in class_name:
safe_import(module_name, name)
return
try:
module = importlib.import_module(module_name, __package__)
globals()[class_name] = getattr(module, class_name)
except ImportError as error:
msg = "Can't Import Widget: '%s.%s', %s"
logger.warn(msg % (module_name, class_name, error))
logger.debug(traceback.format_exc())
safe_import(".backlight", "Backlight")
safe_import(".battery", ["Battery", "BatteryIcon"])
safe_import(".clock", "Clock")
safe_import(".currentlayout", "CurrentLayout")
safe_import(".debuginfo", "DebugInfo")
safe_import(".graph", ["CPUGraph", "MemoryGraph", "SwapGraph", "NetGraph",
"HDDGraph", "HDDBusyGraph"])
safe_import(".groupbox", ["AGroupBox", "GroupBox"])
safe_import(".maildir", "Maildir")
safe_import(".notify", "Notify")
safe_import(".prompt", "Prompt")
safe_import(".sensors", "ThermalSensor")
safe_import(".sep", "Sep")
safe_import(".she", "She")
safe_import(".spacer", "Spacer")
safe_import(".systray", "Systray")
safe_import(".textbox", "TextBox")
safe_import(".volume", "Volume")
safe_import(".windowname", "WindowName")
safe_import(".windowtabs", "WindowTabs")
safe_import(".keyboardlayout", "KeyboardLayout")
safe_import(".df", "DF")
safe_import(".image", "Image")
safe_import(".gmail_checker", "GmailChecker")
safe_import(".clipboard", "Clipboard")
safe_import(".countdown", "Countdown")
safe_import(".tasklist", "TaskList")
safe_import(".pacman", "Pacman")
safe_import(".launchbar", "LaunchBar")
safe_import(".canto", "Canto")
safe_import(".mpriswidget", "Mpris")
safe_import(".mpris2widget", "Mpris2")
safe_import(".mpdwidget", "Mpd")
safe_import(".yahoo_weather", "YahooWeather")
safe_import(".bitcoin_ticker", "BitcoinTicker")
safe_import(".wlan", "Wlan")
safe_import(".google_calendar", "GoogleCalendar")
| mit | Python |
4ef62836086ea0c6adb0430af78c75614afaacfd | Create reminders.py | JLJTECH/TutorialTesting | Misc/reminders.py | Misc/reminders.py | #Python reminders - those pesky Gotchas!
''.join(list) # Collapse the list
[a,b,c,d,e].count(item) #Count occurrences of item in list
a[0] #put anything in square brackets to search list index
#Strange list rotation
def rotate_left3(nums):
a = nums[0]
nums[0] = nums[1]
nums[1] = nums[2]
nums[2] = a
return nums -> [1,2,3] = [2,3,1]
#Evaluator positioning
if someting:
return (item 1 >= 10)
else:
return (item > 1 and item <= 5)
| mit | Python | |
82b06e07348a21d509e2d1865913e2faed864b11 | Add time series analysis | j0h4x0r/ReliabilityLens,j0h4x0r/ReliabilityLens,j0h4x0r/ReliabilityLens | StatusAnalysis.py | StatusAnalysis.py | import math
import collections
def analyze_series(series):
'''
This is a time series analysis. The input is a list of number,
and it returns a score showing how normal the potential pattern is.
We assume the data are normally distributed, and likelihood is
then used as the score.
'''
# This may only works well with large data
if not len(series):
return 0
# compute likelihood
mean = float(sum(series)) / len(series)
variance = sum(map(lambda x: (x - mean) ** 2, series))
distribution = lambda x: 1 / math.sqrt(2 * math.pi * variance) * (math.e ** ((-(x - mean) ** 2) / (2 * variance)))
likelihood = reduce(lambda x, y: x * y, map(distribution, series))
return likelihood
def generate_count_series(tweets):
'''
This function takes a list of tweets (with timestamps)
and return a list of the numbers of tweets a user created every day.
'''
dates = map(lambda t: t['created_at'].date(), tweets)
count_dict = collections.Counter(dates)
min_date = min(dates)
days = (max(dates) - min_date).days + 1
count_series = [count_dict[min_date + datetime.timedelta(i)] if min_date + datetime.timedelta(i) in count_dict else 0 for i in range(days)]
return count_series | mit | Python | |
b23b86265dd952a3bfa1684549ab2087682c9733 | add dispatch namespace | quantopian/datashape,cowlicks/datashape,cowlicks/datashape,llllllllll/datashape,ContinuumIO/datashape,aterrel/datashape,llllllllll/datashape,aterrel/datashape,blaze/datashape,quantopian/datashape,cpcloud/datashape,cpcloud/datashape,ContinuumIO/datashape,blaze/datashape | datashape/dispatch.py | datashape/dispatch.py | from multipledispatch import dispatch
from functools import partial
namespace = dict()
dispatch = partial(dispatch, namespace=namespace)
| bsd-2-clause | Python | |
285236e1045915706b0cf2c6137273be7f9eb5d6 | Add generic abstract Module class | ECAM-Brussels/ECAMTV,ECAM-Brussels/ECAMTV,ECAM-Brussels/ECAMTV | modules/module.py | modules/module.py | # module.py
# Author: Sébastien Combéfis
# Version: May 25, 2016
from abc import *
class Module(metaclass=ABCMeta):
'''Abstract class representing a generic module.'''
def __init__(self, name):
self.__name = name
@property
def name(self):
return self.__name
@abstractmethod
def widget(self):
'''Returns a function that renders the widget view of the module
Pre: -
Post: The returned value contains the HTML rendering of the widget view
of this module or None if not supported by this module
'''
...
@abstractmethod
def page(self):
'''Returns a function that renders the page view of the module
Pre: -
Post: The returned value contains the HTML rendering of the page view
of this module or None if not supported by this module
'''
... | agpl-3.0 | Python | |
fccbc71622299e9987d470bb73ab071f705d98ab | Add module to support Mac OSX's System Profiler utility. | saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt | salt/modules/systemprofiler.py | salt/modules/systemprofiler.py | # -*- coding: utf-8 -*-
'''
System Profiler Module
Interface with Mac OSX's command-line System Profiler utility to get
information about package receipts and installed applications.
'''
import plistlib
import subprocess
import pprint
import salt.utils.which
PROFILER_BINARY = '/usr/sbin/system_profiler'
def __virtual__():
'''
Check to see if the system_profiler binary is available
'''
PROFILER_BINARY = salt.utils.which('system_profiler')
if PROFILER_BINARY:
return True
else:
return False
def _call_system_profiler(datatype):
'''
Call out to system_profiler. Return a dictionary
of the stuff we are interested in.
'''
plist = plistlib.readPlistFromString(subprocess.check_output(
[PROFILER_BINARY, '-detailLevel', 'full',
'-xml', datatype]))
try:
apps = plist[0]['_items']
except (IndexError, KeyError):
apps = []
return apps
def receipts():
'''
Return the results of a call to
`system_profiler -xml -detail full
SPInstallHistoryDataType`
as a dictionary. Top-level keys of the dictionary
are the names of each set of install receipts, since
there can be multiple receipts with the same name.
Contents of each key are a list of dicttionaries.
CLI Example:
.. code-block:: bash
salt '*' systemprofiler.receipts
'''
apps = _call_system_profiler('SPInstallHistoryDataType')
appdict = {}
for a in apps:
details = dict(a)
details.pop('_name')
if 'install_date' in details:
details['install_date'] = details['install_date'].strftime('%Y-%m-%d %H:%M:%S')
if 'info' in details:
try:
details['info'] = '{0}: {1}'.format(details['info'][0],
details['info'][1].strftime('%Y-%m-%d %H:%M:%S'))
except (IndexError, AttributeError):
pass
if a['_name'] not in appdict:
appdict[a['_name']] = []
appdict[a['_name']].append(details)
return appdict
def applications():
'''
Return the results of a call to
`system_profiler -xml -detail full
SPApplicationsDataType`
as a dictionary. Top-level keys of the dictionary
are the names of each set of install receipts, since
there can be multiple receipts with the same name.
Contents of each key are a list of dicttionaries.
Note that this can take a long time depending on how many
applications are installed on the target Mac.
CLI Example:
.. code-block:: bash
salt '*' systemprofiler.applications
'''
apps = _call_system_profiler('SPApplicationsDataType')
appdict = {}
for a in apps:
details = dict(a)
details.pop('_name')
if 'lastModified' in details:
details['lastModified'] = details['lastModified'].strftime('%Y-%m-%d %H:%M:%S')
if 'info' in details:
try:
details['info'] = '{0}: {1}'.format(details['info'][0],
details['info'][1].strftime('%Y-%m-%d %H:%M:%S'))
except (IndexError, AttributeError):
pass
if a['_name'] not in appdict:
appdict[a['_name']] = []
appdict[a['_name']].append(details)
return appdict
| apache-2.0 | Python | |
66c5f0d6d44c6bf9e72c52864268cf64dddf4b42 | Add metadata reader script | hadim/fiji_scripts,hadim/fiji_tools,hadim/fiji_scripts,hadim/fiji_scripts,hadim/fiji_tools | src/main/resources/script_templates/Hadim_Scripts/Metadata/Read_Metadata.py | src/main/resources/script_templates/Hadim_Scripts/Metadata/Read_Metadata.py | # @ImageJ ij
from io.scif import FieldPrinter
filePath = "/home/hadim/.data/Test/IM000556.Tif"
format = ij.scifio().format().getFormat(filePath)
metadata = format.createParser().parse(filePath)
#print(FieldPrinter(metadata))
imageMeta = metadata.get(0)
print(imageMeta) | bsd-3-clause | Python | |
8587cfb3f36af6f39f54b072a0518e9b81cac850 | add record_wav | Akagi201/learning-python,Akagi201/learning-python,Akagi201/learning-python,Akagi201/learning-python,Akagi201/learning-python | pyaudio/record_wav.py | pyaudio/record_wav.py | # !/usr/bin/env python
"""PyAudio example: Record a few seconds of audio and save to a WAVE file."""
import pyaudio
import wave
CHUNK = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 2
RATE = 44100
RECORD_SECONDS = 5
WAVE_OUTPUT_FILENAME = "output.wav"
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
print("* recording")
frames = []
for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
data = stream.read(CHUNK)
frames.append(data)
print("* done recording")
stream.stop_stream()
stream.close()
p.terminate()
wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
wf.close()
| mit | Python | |
245cdcd483f40bbe91e9c9695a73e7ef875d4eb4 | Add harvester for CiteSeerX | CenterForOpenScience/scrapi,erinspace/scrapi,felliott/scrapi,felliott/scrapi,fabianvf/scrapi,erinspace/scrapi,fabianvf/scrapi,CenterForOpenScience/scrapi | scrapi/harvesters/citeseerx.py | scrapi/harvesters/citeseerx.py | '''
Harvester for the "CiteSeerX Scientific Literature Digital Library and Search Engine" for the SHARE project
Example API call: http://citeseerx.ist.psu.edu/oai2?verb=ListRecords&metadataPrefix=oai_dc
'''
from __future__ import unicode_literals
from scrapi.base import OAIHarvester
class CiteseerxHarvester(OAIHarvester):
short_name = 'citeseerx'
long_name = '"CiteSeerX Scientific Literature Digital Library and Search Engine"'
url = 'http://citeseerx.ist.psu.edu'
base_url = 'http://citeseerx.ist.psu.edu/oai2'
property_list = ['rights', 'format', 'source', 'date', 'identifier', 'type', 'setSpec']
timezone_granularity = False
| apache-2.0 | Python | |
3b25faf6a2e95e3278cde45c04c4adcb50b3659a | add script find_duplicates | ZTH1970/alcide,ZTH1970/alcide,ZTH1970/alcide,ZTH1970/alcide,ZTH1970/alcide | scripts/find_duplicate_acts.py | scripts/find_duplicate_acts.py | #!/usr/bin/env python
import os
import datetime as dt
from collections import defaultdict
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "calebasse.settings")
from calebasse.actes.validation import get_days_with_acts_not_locked
from calebasse.actes.models import Act
doubles = defaultdict(lambda: set())
days_with_acts_not_locked = get_days_with_acts_not_locked(dt.date(2010,1,1), dt.date.today())
acts = Act.objects.filter(date__in=days_with_acts_not_locked) \
.order_by('time') \
.prefetch_related('doctors')
for act in acts:
participants_id = [doctor.id for doctor in act.doctors.all()]
key = (act.date, act.patient_id, act.act_type_id, tuple(sorted(participants_id)))
doubles[key].add(act)
for key in doubles.keys():
if len(doubles[key]) < 2:
del doubles[key]
date = None
total = 0
for key in sorted(doubles.iterkeys()):
for act in doubles[key]:
if not act.validation_locked:
break
else:
continue
if key[0] != date:
if date is not None:
print
date = key[0]
print ' = Acte en double le ', date, '='
print '{:>6} {:>6} {:>6} {:>6} {:>6}'.format('act_id', 'ev_id', 'exc_id', 'old_id', 'heure')
for act in sorted(doubles[key]):
total += 1
exception_to = ''
if act.parent_event:
exception_to = act.parent_event.exception_to_id
print '%06d' % act.id, '%6s' % act.parent_event_id, '%6s' % exception_to, '%6s' % act.old_id, '%6s' % act.time.strftime('%H:%M'), act, act.validation_locked, act.actvalidationstate_set.all()
print
print 'Total', total, 'actes'
| agpl-3.0 | Python | |
e110c968ece35e41c467aeb5fceb9274023e7e82 | Create child class for Rainbow bulb | litobro/PyPlaybulb | pyplaybulb/rainbow.py | pyplaybulb/rainbow.py | from pyplaybulb.playbulb import Playbulb
EFFECT_FLASH = '00'
EFFECT_PULSE = '01'
EFFECT_RAINBOW = '02'
EFFECT_RAINBOW_FADE = '03'
class Rainbow(Playbulb):
hexa_set_colour = '0x001b'
hexa_effect = '0x0019'
hexa_get_colour = '0x0019'
def set_colour(self, colour):
self.connection.char_write(self.hexa_set_colour, colour)
def get_colour(self):
return self.connection.char_read(self.hexa_get_colour)
def set_effect(self, effect_type, color, speed):
self.connection.char_write(self.hexa_effect, color+effect_type+'00'+speed+'00') | mit | Python | |
c2a2bb683df9f86fefadace4a0375e696b8c06d9 | add up2difiles | sevaivanov/various,sevaivanov/various,sevaivanov/various,sevaivanov/various,sevaivanov/various,sevaivanov/various | python/up2diffiles.py | python/up2diffiles.py | #!/usr/bin/python
lines = None
with open('ftp-betterdefaultpasslist.txt', 'r') as f: lines=f.readlines()
for line in lines:
user,pwd = line.split(':')
print(user, pwd)
for line in lines:
user,pwd = line.split(':')
with open('users.txt', 'w') as f:
f.write(user.strip() + '\n')
with open('pwds.txt', 'w') as f:
f.write(pwd.strip() + '\n')
| mit | Python | |
e233352d5016c2b57ec4edbc4366ca4347bc1d98 | Create a single script to run the three demo services | uptane/uptane,awwad/uptane,awwad/uptane,uptane/uptane | demo/start_servers.py | demo/start_servers.py | """
start_servers.py
<Purpose>
A simple script to start the three cloud-side Uptane servers:
the Director (including its per-vehicle repositories)
the Image Repository
the Timeserver
To run the demo services in non-interactive mode, run:
python start_servers.py
To run the demo services in interactive mode, run:
python -i -c "from demo.start_servers import *; main()"
In either mode, the demo services will respond to commands sent via XMLRPC.
"""
import threading
import demo
import demo.demo_timeserver as dt
import demo.demo_director as dd
import demo.demo_image_repo as di
from six.moves import xmlrpc_server
def main():
# Start demo Image Repo, including http server and xmlrpc listener (for
# webdemo)
di.clean_slate()
# Start demo Director, including http server and xmlrpc listener (for
# manifests, registrations, and webdemo)
dd.clean_slate()
# Start demo Timeserver, including xmlrpc listener (for requests from demo
# Primary)
dt.listen()
if __name__ == '__main__':
main()
| mit | Python | |
71af3a9b46094d94eb47e662cd30726140213de5 | Read graph. | PauliusLabanauskis/AlgorithmsDataStructures | algo_pathfinding/graph_input.py | algo_pathfinding/graph_input.py | def read_graph(path_file):
graph = {}
with open(path_file, 'r') as f:
data = f.read()
graph_data = eval(data)
return graph_data | unlicense | Python | |
ca9144c68d0c5fe08a109f26f595f3c7f0b6500d | Add errors.py and FontmakeError | googlefonts/fontmake,googlefonts/fontmake,googlei18n/fontmake,googlei18n/fontmake | Lib/fontmake/errors.py | Lib/fontmake/errors.py |
class FontmakeError(Exception):
"""Base class for all fontmake exceptions."""
pass
| apache-2.0 | Python | |
294aee87691857636cb433a800a85f395e359fcb | Add gallery example for grdview (#502) | GenericMappingTools/gmt-python,GenericMappingTools/gmt-python | examples/gallery/grid/grdview_surface.py | examples/gallery/grid/grdview_surface.py | """
Plotting a surface
------------------
The :meth:`pygmt.Figure.grdview()` method can plot 3-D surfaces with ``surftype="s"``. Here,
we supply the data as an :class:`xarray.DataArray` with the coordinate vectors ``x`` and
``y`` defined. Note that the ``perspective`` argument here controls the azimuth and
elevation angle of the view. We provide a list of two arguments to ``frame`` — the
second argument, prepended with ``"z"``, specifies the :math:`z`-axis frame attributes.
Specifying the same scale for the ``projection`` and ``zcale`` arguments ensures equal
axis scaling.
"""
import pygmt
import numpy as np
import xarray as xr
# Define an interesting function of two variables, see:
# https://en.wikipedia.org/wiki/Ackley_function
def ackley(x, y):
return (
-20 * np.exp(-0.2 * np.sqrt(0.5 * (x ** 2 + y ** 2)))
- np.exp(0.5 * (np.cos(2 * np.pi * x) + np.cos(2 * np.pi * y)))
+ np.exp(1)
+ 20
)
# Create gridded data
INC = 0.05
x = np.arange(-5, 5 + INC, INC)
y = np.arange(-5, 5 + INC, INC)
data = xr.DataArray(ackley(*np.meshgrid(x, y)), coords=(x, y))
fig = pygmt.Figure()
# Plot grid as a 3-D surface
SCALE = 0.2 # [inches]
fig.grdview(
data,
frame=["a5f1", "za5f1"],
projection=f"x{SCALE}i",
zscale=f"{SCALE}i",
surftype="s",
cmap="roma",
perspective="135/30",
)
fig.show()
| bsd-3-clause | Python | |
1058ed0847d151246299f73b325004fc04946fa0 | Set 1 - Challenge 2 | Scythe14/Crypto | Basics/challenge_2.py | Basics/challenge_2.py | #!/usr/bin/env python
if __name__ == '__main__':
s1 = 0x1c0111001f010100061a024b53535009181c
s2 = 0x686974207468652062756c6c277320657965
print(hex(s1 ^ s2))
| apache-2.0 | Python | |
2c30746908108aaeea40f5bf8511e0a1f343e1d9 | Create mapoon.py | Lcaracol/ideasbox.lan,ideascube/ideascube,Lcaracol/ideasbox.lan,Lcaracol/ideasbox.lan,ideascube/ideascube,ideascube/ideascube,ideascube/ideascube | ideasbox/conf/mapoon.py | ideasbox/conf/mapoon.py | # -*- coding: utf-8 -*-
"""Mapmoon box in Australia"""
from .base import * # noqa
from django.utils.translation import ugettext_lazy as _
IDEASBOX_NAME = u"Mapoon"
COUNTRIES_FIRST = ['AU']
TIME_ZONE = 'Australia/Darwin'
LANGUAGE_CODE = 'en'
MONITORING_ENTRY_EXPORT_FIELDS = ['serial', 'refugee_id', 'birth_year',
'gender']
USER_FORM_FIELDS = (
('Ideasbox', ['serial', 'box_awareness']),
(_('Personal informations'), ['refugee_id', 'short_name', 'full_name', 'latin_name', 'birth_year', 'gender']), # noqa
(_('Family'), ['marital_status', 'family_status', 'children_under_12', 'children_under_18', 'children_above_18']), # noqa
(_('In the camp'), ['camp_entry_date', 'camp_activities', 'current_occupation', 'camp_address']), # noqa
(_('Origin'), ['country', 'city', 'country_of_origin_occupation', 'school_level', 'is_sent_to_school']), # noqa
(_('Language skills'), ['ar_level', 'en_level']),
(_('National residents'), ['id_card_number']),
)
ENTRY_ACTIVITY_CHOICES = [
('16 Days of Activism', _('16 Days of Activism')),
("AMANI Campaign", _("AMANI Campaign")),
("Anger Management Training", _("Anger Management Training")),
("Basic Computer Training", _("Basic Computer Training")),
("Beauty Training", _("Beauty Training")),
("Book Club", _("Book Club")),
("Conflict Resolution Training", _("Conflict Resolution Training")),
("Coping Skills and Mechanisms Training", _("Coping Skills and Mechanisms Training")), # noqa
("EDRAAK", _("EDRAAK")),
("Emotional intelligence Training", _("Emotional intelligence Training")),
("Handicrafts", _("Handicrafts")),
("How to be a Psychosocial Counselor Training", _("How to be a Psychosocial Counselor Training")), # noqa
("I am Woman", _("I am Woman")),
("International Children Day", _("International Children Day")),
("International Refugee Day", _("International Refugee Day")),
("Marathon", _("Marathon")),
("Mother's day celebration", _("Mother's day celebration")),
("Parenting Skills Training", _("Parenting Skills Training")),
("Peer Support Group", _("Peer Support Group")),
("Psychosocial ART Interventions Training", _("Psychosocial ART Interventions Training")), # noqa
("Puppets and Theatre", _("Puppets and Theatre")),
("Sewing and stitching", _("Sewing and stitching")),
("SIMSIM Club", _("SIMSIM Club")),
("Social Work Training", _("Social Work Training")),
("Stress Management Training", _("Stress Management Training")),
("Training of Trainers", _("Training of Trainers")),
("World Mental Health Day", _("World Mental Health Day")),
]
| agpl-3.0 | Python | |
ea3dca7a2fb203d639b2eba74f21f95b24fecfbc | Create sdfdfdf.py (#13) | sajjadelastica/3G45,sajjadelastica/3G45,sajjadelastica/3G45,sajjadelastica/3G45 | sdfdfdf.py | sdfdfdf.py | efefefdsf
| apache-2.0 | Python | |
a328b1be6b90d2faa5fa717ffd8515115e1775dd | Add unit tests for tenant_usages_client | rakeshmi/tempest,sebrandon1/tempest,Juniper/tempest,vedujoshi/tempest,zsoltdudas/lis-tempest,LIS/lis-tempest,flyingfish007/tempest,dkalashnik/tempest,vedujoshi/tempest,masayukig/tempest,cisco-openstack/tempest,Tesora/tesora-tempest,bigswitch/tempest,tonyli71/tempest,cisco-openstack/tempest,zsoltdudas/lis-tempest,rakeshmi/tempest,sebrandon1/tempest,dkalashnik/tempest,Tesora/tesora-tempest,xbezdick/tempest,flyingfish007/tempest,tonyli71/tempest,bigswitch/tempest,LIS/lis-tempest,pczerkas/tempest,openstack/tempest,Juniper/tempest,pczerkas/tempest,izadorozhna/tempest,openstack/tempest,izadorozhna/tempest,masayukig/tempest,xbezdick/tempest | tempest/tests/services/compute/test_tenant_usages_client.py | tempest/tests/services/compute/test_tenant_usages_client.py | # Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import httplib2
from oslo_serialization import jsonutils as json
from oslotest import mockpatch
from tempest.services.compute.json import tenant_usages_client
from tempest.tests import base
from tempest.tests import fake_auth_provider
class TestTenantUsagesClient(base.TestCase):
FAKE_SERVER_USAGES = [{
"ended_at": None,
"flavor": "m1.tiny",
"hours": 1.0,
"instance_id": "1f1deceb-17b5-4c04-84c7-e0d4499c8fe0",
"local_gb": 1,
"memory_mb": 512,
"name": "new-server-test",
"started_at": "2012-10-08T20:10:44.541277",
"state": "active",
"tenant_id": "openstack",
"uptime": 3600,
"vcpus": 1
}]
FAKE_TENANT_USAGES = [{
"server_usages": FAKE_SERVER_USAGES,
"start": "2012-10-08T21:10:44.587336",
"stop": "2012-10-08T22:10:44.587336",
"tenant_id": "openstack",
"total_hours": 1,
"total_local_gb_usage": 1,
"total_memory_mb_usage": 512,
"total_vcpus_usage": 1
}]
def setUp(self):
super(TestTenantUsagesClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = tenant_usages_client.TenantUsagesClient(
fake_auth, 'compute', 'regionOne')
def _test_list_tenant_usages(self, bytes_body=False):
serialized_body = json.dumps({"tenant_usages":
self.FAKE_TENANT_USAGES})
if bytes_body:
serialized_body = serialized_body.encode('utf-8')
mocked_resp = (httplib2.Response({'status': 200}), serialized_body)
self.useFixture(mockpatch.Patch(
'tempest.common.service_client.ServiceClient.get',
return_value=mocked_resp))
resp = self.client.list_tenant_usages()
self.assertEqual({"tenant_usages": self.FAKE_TENANT_USAGES}, resp)
def test_list_tenant_usages_with_str_body(self):
self._test_list_tenant_usages()
def test_list_tenant_usages_with_bytes_body(self):
self._test_list_tenant_usages(bytes_body=True)
def _test_show_tenant_usage(self, bytes_body=False):
serialized_body = json.dumps({"tenant_usage":
self.FAKE_TENANT_USAGES[0]})
if bytes_body:
serialized_body = serialized_body.encode('utf-8')
mocked_resp = (httplib2.Response({'status': 200}), serialized_body)
self.useFixture(mockpatch.Patch(
'tempest.common.service_client.ServiceClient.get',
return_value=mocked_resp))
resp = self.client.show_tenant_usage('openstack')
self.assertEqual({"tenant_usage": self.FAKE_TENANT_USAGES[0]}, resp)
def test_show_tenant_usage_with_str_body(self):
self._test_show_tenant_usage()
def test_show_tenant_usage_with_bytes_body(self):
self._test_show_tenant_usage(bytes_body=True)
| apache-2.0 | Python | |
9f6572a9ea20cdeaeba93c4029b5409966d25535 | add a reboot example | diydrones/dronekit-python,dronekit/dronekit-python,hamishwillee/dronekit-python,diydrones/dronekit-python,dronekit/dronekit-python,hamishwillee/dronekit-python | examples/reboot/reboot.py | examples/reboot/reboot.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from dronekit import connect
import time
# Set up option parsing to get connection string
import argparse
parser = argparse.ArgumentParser(description='Reboots vehicle')
parser.add_argument('--connect',
help="Vehicle connection target string. If not specified, SITL automatically started and used.")
args = parser.parse_args()
connection_string = args.connect
sitl = None
# Start SITL if no connection string specified
if not connection_string:
import dronekit_sitl
sitl = dronekit_sitl.start_default()
connection_string = sitl.connection_string()
# Connect to the Vehicle
print('Connecting to vehicle on: %s' % connection_string)
vehicle = connect(connection_string, wait_ready=True)
vehicle.reboot()
time.sleep(1)
# Shut down simulator if it was started.
if sitl:
sitl.stop()
| apache-2.0 | Python | |
902473ff170fffb635cf73e742f94e352994e954 | Create FetchNCBIseq.py | minesh1291/Sequence-Utilities,minesh1291/Sequence-Utilities | Online/FetchNCBIseq.py | Online/FetchNCBIseq.py | ### Fetch Genomics Sequence Using Coordinates In Biopython
from Bio import Entrez, SeqIO
Entrez.email = "A.N.Other@example.com" # Always tell NCBI who you are
handle = Entrez.efetch(db="nucleotide",
id="307603377",
rettype="fasta",
strand=1,
seq_start=4000100,
seq_stop=4000200)
record = SeqIO.read(handle, "fasta")
handle.close()
print record.seq
#db - database
#id - GI
#strand - what strand of DNA to show (1 = plus or 2 = minus)
#seq_start - show sequence starting from this base number
#seq_stop - show sequence ending on this base number
| apache-2.0 | Python | |
067371d1baeae5f0502480738233c340de5a8033 | Create allAtomNetwork.py | hua372494277/protein-contact-maps | allAtomNetwork.py | allAtomNetwork.py | # calculate the clustering coefficient, characteristic path length, entropy, assortativity coefficient, Diameter and Radius
# of the network based on the coordinate of all atoms
import math
import networkx as nx
def distance(node1, node2):
dist = 0.0
distC = 0.0
for atom1 in node1:
for atom2 in node2:
distC = pow( node1[atom1]['x'] - node2[atom2]['x'], 2 )
distC += pow( node1[atom1]['y'] - node2[atom2]['y'], 2 )
distC += pow( node1[atom1]['z'] - node2[atom2]['z'], 2 )
distC = math.sqrt( distC )
if dist == 0.0 or dist > distC:
dist = distC
return dist
coordinateP = r'D:\NextIdea\allAtomCoordinate'
fr = open(r'D:\NextIdea\Dataset\scopClassHelixSheetpercentageNew.csv')
fw = open(r'D:\NextIdea\Dataset\allatomNetworkAddProperties.csv','w')
G = nx.Graph()
countL = 0
while True:
countL += 1
print(countL)
line = fr.readline()
if not line:
break
line = line.rstrip()
items = line.split(',')
pdbname = items[0]
#all atoms' coordinates
frCoordinate = open( coordinateP + '\\' + pdbname + 'AllAtoms')
coordinate = {}
while True:
lineCoordinate = frCoordinate.readline()
if not lineCoordinate:
break
itemsC = lineCoordinate.split()
numR = int(itemsC[1])
coordinate.setdefault(numR,{})
coordinate[numR].setdefault(int(itemsC[0]),{})
coordinate[numR][int(itemsC[0])]['x'] = float(itemsC[-3])
coordinate[numR][int(itemsC[0])]['y'] = float(itemsC[-2])
coordinate[numR][int(itemsC[0])]['z'] = float(itemsC[-1])
frCoordinate.close()
for i in coordinate:
for j in coordinate:
if i < j:
dis = distance(coordinate[i], coordinate[j])
if dis <= 5.0 :
G.add_edge(i,j)#, weight = distance)
if not nx.is_connected(G):
scc = nx.connected_component_subgraphs(G)
for sub in scc:
input(sub.nodes())
degreeAssor = nx.degree_assortativity_coefficient(G)
#print(degreeAssor)
fw.write(str(degreeAssor) + ',')
diameter = nx.diameter(G)
#print(diameter)
fw.write(str(diameter) + ',')
radius = nx.radius(G)
#input(radius)
fw.write(str(radius) + '\n')
## clusteringc = 0.0
## averageLength = 0.0
## entropy = 0
## for ii in range(0, 10):
## while True:
## Gtest = nx.gnm_random_graph(G.number_of_nodes(), G.number_of_edges())
## if nx.is_connected(Gtest):
## break
##
## clusteringc += nx.average_clustering(Gtest)
## averageLength += nx.average_shortest_path_length(Gtest)
##
## degreeSeq = nx.degree_histogram(Gtest)
##
## for degreeI in range(0,len(degreeSeq)):
## percent = degreeSeq[degreeI]/nodeNum
## if percent > 0:
## entropy -= percent * math.log( percent, 2)
##
## Gtest.clear()
##
##
## fw.write(str(entropy/10.0) + ',')
## fw.write(str(clusteringc/10.0) + ',')
## fw.write(str(averageLength/10.0) + '\n')
frCoordinate.close()
G.clear()
fr.close()
fw.close()
| bsd-3-clause | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.