commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13
values | lang stringclasses 23
values |
|---|---|---|---|---|---|---|---|---|
623115b7cb26c6402479845dd96e69c613ad4b98 | Create easy_23_DashInsert.py | GabrielGhe/CoderbyteChallenges,GabrielGhe/CoderbyteChallenges | easy_23_DashInsert.py | easy_23_DashInsert.py | def odd(ch):
return ch in '13579'
##############################
# Inserts dashes between odd #
# digits #
##############################
def DashInsert(num):
result = []
prev = ' '
for curr in str(num):
if odd(prev) and odd(curr):
result.append('-')
result.append(curr)
prev = curr
return ''.join(result)
print DashInsert(raw_input())
| mit | Python | |
bfe073671910efdd932b92c2bb40dc24c230733a | fix migrations | DMPwerkzeug/DMPwerkzeug,DMPwerkzeug/DMPwerkzeug,rdmorganiser/rdmo,rdmorganiser/rdmo,rdmorganiser/rdmo,DMPwerkzeug/DMPwerkzeug | apps/domain/migrations/0024_meta.py | apps/domain/migrations/0024_meta.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-11-14 12:11
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('domain', '0023_fix_label'),
]
operations = [
migrations.AlterModelManagers(
name='attribute',
managers=[
],
),
migrations.AlterModelManagers(
name='attributeentity',
managers=[
],
),
]
| apache-2.0 | Python | |
b35dc73429d8625b298017625b4521a2f3a00eea | Add testing module | tommyogden/maxwellbloch,tommyogden/maxwellbloch | maxwellbloch/testing.py | maxwellbloch/testing.py | # -*- coding: utf-8 -*-
import nose
def run():
"""
Run all tests with nose.
"""
# runs tests in maxwellbloch.tests module
nose.run(defaultTest="maxwellbloch.tests", argv=['nosetests', '-v'])
| mit | Python | |
cc04592ea5ea15944f668928d5b8e6f7d8e257a1 | Update prefix-and-suffix-search.py | kamyu104/LeetCode,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015 | Python/prefix-and-suffix-search.py | Python/prefix-and-suffix-search.py | # Time: ctor: O(w * l), w is the number of words, l is the word length on average
# search: O(m + n), m is the number of prefix match, n is the number of suffix match
# Space: O(w * l)
class Trie(object):
def __init__(self):
_trie = lambda: collections.defaultdict(_trie)
self.__trie = _trie()
def insert(self, word, i):
def add_word(cur, i):
if "_words" not in cur:
cur["_words"] = []
cur["_words"].append(i)
cur = self.__trie
add_word(cur, i)
for c in word:
cur = cur[c]
add_word(cur, i)
def find(self, word):
cur = self.__trie
for c in word:
if c not in cur:
return []
cur = cur[c]
return cur["_words"]
class WordFilter(object):
def __init__(self, words):
"""
:type words: List[str]
"""
self.__prefix_trie = Trie()
self.__suffix_trie = Trie()
for i in reversed(xrange(len(words))):
self.__prefix_trie.insert(words[i], i)
self.__suffix_trie.insert(words[i][::-1], i)
def f(self, prefix, suffix):
"""
:type prefix: str
:type suffix: str
:rtype: int
"""
prefix_match = self.__prefix_trie.find(prefix)
suffix_match = self.__suffix_trie.find(suffix[::-1])
i, j = 0, 0
while i != len(prefix_match) and j != len(suffix_match):
if prefix_match[i] == suffix_match[j]:
return prefix_match[i]
elif prefix_match[i] > suffix_match[j]:
i += 1
else:
j += 1
return -1
# Your WordFilter object will be instantiated and called as such:
# obj = WordFilter(words)
# param_1 = obj.f(prefix,suffix)
| # Time: ctor: O(w * l), l is the word length on average
# search: O(m + n), m is the number of prefix match, n is the number of suffix match
# Space: O(w * l), w is the number of words
class Trie(object):
def __init__(self):
_trie = lambda: collections.defaultdict(_trie)
self.__trie = _trie()
def insert(self, word, i):
def add_word(cur, i):
if "_words" not in cur:
cur["_words"] = []
cur["_words"].append(i)
cur = self.__trie
add_word(cur, i)
for c in word:
cur = cur[c]
add_word(cur, i)
def find(self, word):
cur = self.__trie
for c in word:
if c not in cur:
return []
cur = cur[c]
return cur["_words"]
class WordFilter(object):
def __init__(self, words):
"""
:type words: List[str]
"""
self.__prefix_trie = Trie()
self.__suffix_trie = Trie()
for i in reversed(xrange(len(words))):
self.__prefix_trie.insert(words[i], i)
self.__suffix_trie.insert(words[i][::-1], i)
def f(self, prefix, suffix):
"""
:type prefix: str
:type suffix: str
:rtype: int
"""
prefix_match = self.__prefix_trie.find(prefix)
suffix_match = self.__suffix_trie.find(suffix[::-1])
i, j = 0, 0
while i != len(prefix_match) and j != len(suffix_match):
if prefix_match[i] == suffix_match[j]:
return prefix_match[i]
elif prefix_match[i] > suffix_match[j]:
i += 1
else:
j += 1
return -1
# Your WordFilter object will be instantiated and called as such:
# obj = WordFilter(words)
# param_1 = obj.f(prefix,suffix)
| mit | Python |
6718e97b23d67dda6e67cda8226030edd90f7fbd | add env.py for the migrations | hypebeast/etapi,hypebeast/etapi,hypebeast/etapi,hypebeast/etapi | migrations/env.py | migrations/env.py | from __future__ import with_statement
from alembic import context
from sqlalchemy import engine_from_config, pool
from logging.config import fileConfig
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
from flask import current_app
config.set_main_option('sqlalchemy.url', current_app.config.get('SQLALCHEMY_DATABASE_URI'))
target_metadata = current_app.extensions['migrate'].db.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(url=url)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
engine = engine_from_config(
config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
connection = engine.connect()
context.configure(
connection=connection,
target_metadata=target_metadata
)
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| bsd-3-clause | Python | |
cc3b29aaa2c0ffa3cde6b901bf4bdf3ce3fb4345 | Add code for pulling pitcher stats for specified date range | jldbc/pybaseball | pybaseball/league_pitching_stats.py | pybaseball/league_pitching_stats.py | import requests
import pandas as pd
from bs4 import BeautifulSoup
def get_soup(start_dt, end_dt):
# get most recent standings if date not specified
if((start_dt is None) or (end_dt is None)):
print('Error: a date range needs to be specified')
return None
url = "http://www.baseball-reference.com/leagues/daily.cgi?user_team=&bust_cache=&type=p&lastndays=7&dates=fromandto&fromandto={}.{}&level=mlb&franch=&stat=&stat_value=0".format(start_dt, end_dt)
s=requests.get(url).content
return BeautifulSoup(s, "html.parser")
def get_table(soup):
table = soup.find_all('table')[0]
data = []
headings = [th.get_text() for th in table.find("tr").find_all("th")][1:]
data.append(headings)
table_body = table.find('tbody')
rows = table_body.find_all('tr')
for row in rows:
cols = row.find_all('td')
cols = [ele.text.strip() for ele in cols]
data.append([ele for ele in cols if ele])
data = pd.DataFrame(data)
data = data.rename(columns=data.iloc[0])
data = data.reindex(data.index.drop(0))
return data
def pitching_stats(start_dt=None, end_dt=None):
# retrieve html from baseball reference
soup = get_soup(start_dt, end_dt)
table = get_table(soup)
return table
| mit | Python | |
bd60a99d832d839d7535a5232453afa807d6e3ee | Create __init__.py | llamafarmer/Pi_Weather_Station,llamafarmer/Pi_Weather_Station,llamafarmer/Pi_Weather_Station | Pi_Weather_Station/__init__.py | Pi_Weather_Station/__init__.py | mit | Python | ||
f60123ea933cba6b57214ad335b244b48cc65fdf | Create valid-tic-tac-toe-state.py | tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,kamyu104/LeetCode,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015 | Python/valid-tic-tac-toe-state.py | Python/valid-tic-tac-toe-state.py | # Time: O(1)
# Space: O(1)
# A Tic-Tac-Toe board is given as a string array board. Return True
# if and only if it is possible to reach this board position
# during the course of a valid tic-tac-toe game.
#
# The board is a 3 x 3 array, and consists of characters " ", "X",
# and "O". The " " character represents an empty square.
#
# Here are the rules of Tic-Tac-Toe:
# - Players take turns placing characters into empty squares (" ").
# - The first player always places "X" characters, while the second player always places "O" characters.
# - "X" and "O" characters are always placed into empty squares, never filled ones.
# - The game ends when there are 3 of the same (non-empty) character filling any row, column, or diagonal.
# - The game also ends if all squares are non-empty.
# - No more moves can be played if the game is over.
#
# Example 1:
# Input: board = ["O ", " ", " "]
# Output: false
# Explanation: The first player always plays "X".
#
# Example 2:
# Input: board = ["XOX", " X ", " "]
# Output: false
# Explanation: Players take turns making moves.
#
# Example 3:
# Input: board = ["XXX", " ", "OOO"]
# Output: false
#
# Example 4:
# Input: board = ["XOX", "O O", "XOX"]
# Output: true
#
# Note:
# - board is a length-3 array of strings, where each string board[i] has length 3.
# - Each board[i][j] is a character in the set {" ", "X", "O"}.
class Solution(object):
def validTicTacToe(self, board):
"""
:type board: List[str]
:rtype: bool
"""
def win(board, player):
for i in xrange(3):
if all(board[i][j] == player for j in xrange(3)):
return True
if all(board[j][i] == player for j in xrange(3)):
return True
return (player == board[1][1] == board[0][0] == board[2][2] or \
player == board[1][1] == board[0][2] == board[2][0])
FIRST, SECOND = ('X', 'O')
x_count = sum(row.count(FIRST) for row in board)
o_count = sum(row.count(SECOND) for row in board)
if o_count not in {x_count-1, x_count}: return False
if win(board, FIRST) and x_count-1 != o_count: return False
if win(board, SECOND) and x_count != o_count: return False
return True
| mit | Python | |
ee7a48da3ef6486c3650f9bcc1f4b59c59642adc | Add unittest-based PyDbLite test | dkkline/PyLiterDB | PyDbLite/test/test_pydblite.py | PyDbLite/test/test_pydblite.py | # -*- coding: iso-8859-1 -*-
import datetime
import unittest
import random
import os
import sys
sys.path.insert(0,os.path.dirname(os.getcwd()))
import PyDbLite
db = None
vals1 = [('simon',datetime.date(1984,8,17),26)]
vals2 = [('camille',datetime.date(1986,12,12),24),
('jean',datetime.date(1989,6,12),21),('florence',datetime.date(1994,1,14),17),
('marie-anne',datetime.date(1999,1,28),12)]
vals3 = [('',datetime.date(2000,10,10),55)]
class TestFunctions(unittest.TestCase):
def test_00_init(self):
global db
db = PyDbLite.Base('test.pdl')
db.create('name','birth','age',mode="override")
def test_01_insert(self):
for i,val in enumerate(vals1+vals2+vals3):
assert db.insert(*val)==i
assert len(db)==len(vals1+vals2+vals3)
def test_10_select(self):
for i,v in enumerate(vals1):
rec = db[i]
for j,field in enumerate(db.fields):
assert rec[field]==v[j]
def test_11_select(self):
assert db(name='foo')==[]
assert db(name='')[0]['birth']==datetime.date(2000,10,10)
def test_12_iter(self):
self.assertEqual(len([x for x in db]),len(db))
for val in vals1+vals2+vals3:
self.assertEqual([ x for x in db if x['name']==val ],db(name=val))
self.assertEqual([ x for x in db if x['birth']==val ],db(birth=val))
self.assertEqual([ x for x in db if x['age']==val ],db(age=val))
def test_30_update(self):
for record in db:
db.update(record,name=record['name'].capitalize())
self.assertEqual(db[0]['name'],"Simon")
#self.assertEqual(db[5]['name'][0],"")
def test_40_delete(self):
del db[0]
self.assertEqual(db(name='Simon'),[])
self.assertEqual(len(db),len(vals1+vals2+vals3)-1)
if __name__=="__main__":
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestFunctions))
unittest.TextTestRunner().run(suite)
| bsd-3-clause | Python | |
403c724ffd9dab4ebdf3a58e02406969ed7a9fcb | Create front_back.py | dvt32/cpp-journey,dvt32/cpp-journey,dvt32/cpp-journey,dvt32/cpp-journey,dvt32/cpp-journey,dvt32/cpp-journey,dvt32/cpp-journey,dvt32/cpp-journey,dvt32/cpp-journey,dvt32/cpp-journey,dvt32/cpp-journey,dvt32/cpp-journey | Python/CodingBat/front_back.py | Python/CodingBat/front_back.py | # http://codingbat.com/prob/p153599
def front_back(str):
if len(str) <= 1:
return str
return str[len(str)-1] + str[1:-1] + str[0]
| mit | Python | |
4a48b9998961be268cbfe64726ea78f68cedce39 | Create not_string.py | dvt32/cpp-journey,dvt32/cpp-journey,dvt32/cpp-journey,dvt32/cpp-journey,dvt32/cpp-journey,dvt32/cpp-journey,dvt32/cpp-journey,dvt32/cpp-journey,dvt32/cpp-journey,dvt32/cpp-journey,dvt32/cpp-journey,dvt32/cpp-journey | Python/CodingBat/not_string.py | Python/CodingBat/not_string.py | # http://codingbat.com/prob/p189441
def not_string(str):
if str.startswith("not"):
return str
else:
return "not " + str
| mit | Python | |
2fc62908b2f0074a0e82a120809b80cb3e009999 | add __init__.py for distro | sassoftware/mint,sassoftware/mint,sassoftware/mint,sassoftware/mint,sassoftware/mint | mint/distro/__init__.py | mint/distro/__init__.py | #
# Copyright (c) 2005 Specifix, Inc.
#
# All rights reserved
#
| apache-2.0 | Python | |
976f7b4239a1ff21d0748f43e8224017084118b7 | make neuroimaging.visualization.tests into a package | alexis-roche/niseg,arokem/nipy,nipy/nireg,alexis-roche/nipy,arokem/nipy,arokem/nipy,alexis-roche/register,bthirion/nipy,bthirion/nipy,alexis-roche/niseg,alexis-roche/nipy,bthirion/nipy,alexis-roche/nipy,nipy/nipy-labs,nipy/nireg,bthirion/nipy,nipy/nipy-labs,alexis-roche/nireg,alexis-roche/register,alexis-roche/nireg,alexis-roche/register,arokem/nipy,alexis-roche/nipy | lib/visualization/tests/__init__.py | lib/visualization/tests/__init__.py | import test_visualization
import unittest
def suite():
return unittest.TestSuite([test_visualization.suite()])
| bsd-3-clause | Python | |
62987e89e08818749a40c18fa329562146b4f761 | Watch trigger added | Uname-a/knife_scraper,Uname-a/knife_scraper,Uname-a/knife_scraper | wow_watches.py | wow_watches.py | #!/usr/bin/env python
# bhq_query.py - module for sopel to query blade head quarters site for knife data
#
# Copyright (c) 2015 Casey Bartlett <caseytb@bu.edu>
#
# See LICENSE for terms of usage, modification and redistribution.
from sopel import *
@module.commands('wtc')
def knife(bot, trigger):
bot.reply("Look I respond to the wtc command now!")
| mit | Python | |
11d42f7789ae3f0a020087b52389af9c98d07901 | add barebones mapper module | cizra/pycat,cizra/pycat | modules/mapper.py | modules/mapper.py | from modules.basemodule import BaseModule
import mapper.libmapper
import pprint
import re
import time
class Mapper(BaseModule):
def __init__(self, mud, mapfname='default.map'):
self.mapfname = mapfname
try:
with open(self.mapfname, 'r') as f:
ser = f.read()
self.m = mapper.libmapper.Map(ser)
except FileNotFoundError:
self.m = mapper.libmapper.Map()
super().__init__(mud)
def quit(self):
with open(self.mapfname, 'w') as f:
f.write(self.m.serialize())
self.log("Serialized map to ", self.mapfname)
def current(self):
return self.gmcp['room']['info']['num']
def path(self, there):
here = self.current()
if here == there:
self.log("Already there!")
return ''
then = time.time()
path = self.m.findPath(here, there)
self.log("{} (found in {} seconds)".format(path, time.time() - then))
return path
def alias(self, line):
words = line.split(' ')
if words[0] != '#map':
return
cmd = words[1]
if cmd == 'here':
here = self.current()
self.log('\n' + pprint.pformat({
'num': here,
'name': self.m.getRoomName(here),
'zone': self.m.getRoomZone(here),
'terrain': self.m.getRoomTerrain(here),
'coords': self.m.getRoomCoords(here),
'exits': self.m.getRoomExits(here),
}))
return True
elif re.match(r'#map path ([^ ]+)', line):
there = int(re.match(r'#map path ([^ ]+)', line).group(1))
self.log(self.path(there))
return True
elif re.match(r'#map go ([^ ])', line):
there = int(re.match(r'#map path ([^ ]+)', line).group(1))
self.send(self.path(there))
return True
def trigger(self, raw, stripped):
pass
def handleGmcp(self, cmd, value):
# room.info {'details': '', 'id': 'Homes#1226', 'terrain': 'cave', 'exits': {'N': -565511209}, 'coord': {'id': 0, 'x': -1, 'cont': 0, 'y': -1}, 'desc': '', 'num': -565511180, 'name': 'An empty room', 'zone': 'Homes'}
# {'coord': {'cont': 0, 'id': 0, 'x': -1, 'y': -1},
# 'desc': '',
# 'details': '',
# 'exits': {'N': -565511209},
# 'id': 'Homes#1226',
# 'name': 'An empty room',
# 'num': -565511180,
# 'terrain': 'cave',
# 'zone': 'Homes'}
if cmd == 'room.info':
id = value['num']
name = value['name']
zone = value['zone']
terrain = value['terrain']
exits = {}
for k, v in value['exits'].items():
exits[k.lower()] = v
self.m.addRoom(id, name, zone, terrain, exits)
| unlicense | Python | |
b58d7ae6b9887b326ba485ce885deb9c03054801 | Create Factorial_of_a_number.py | Jeevan-J/Python_Funcode | Python3-5/Factorial_of_a_number.py | Python3-5/Factorial_of_a_number.py | #Write a program which can compute the factorial of a given numbers.
#We will first define a function
def fact(x): #Define a function named 'fact()'
if x == 0: #We directly return 1 if input number is 0.
return 1 ;
return x * fact(x - 1); # We return 'number * fact(number - 1)'. We are calling the same function again in its own function, This is called Recursive Function
x=int(input("Please enter a number:"));
print fact(x);
| bsd-2-clause | Python | |
a9f6caf863b5c3156c3200d33a6cdc29f0c2ad23 | Add new py-hacking package (#14027) | iulian787/spack,LLNL/spack,LLNL/spack,iulian787/spack,iulian787/spack,LLNL/spack,LLNL/spack,LLNL/spack,iulian787/spack,iulian787/spack | var/spack/repos/builtin/packages/py-hacking/package.py | var/spack/repos/builtin/packages/py-hacking/package.py | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyHacking(PythonPackage):
"""OpenStack Hacking Guideline Enforcement."""
homepage = "https://docs.openstack.org/hacking/latest/"
url = "https://pypi.io/packages/source/h/hacking/hacking-1.1.0.tar.gz"
import_modules = ['hacking']
version('1.1.0', sha256='23a306f3a1070a4469a603886ba709780f02ae7e0f1fc7061e5c6fb203828fee')
depends_on('py-setuptools', type='build')
| lgpl-2.1 | Python | |
e6641065af9078e2e50e99f657aa605d837d3976 | add new package (#20112) | LLNL/spack,LLNL/spack,iulian787/spack,LLNL/spack,iulian787/spack,iulian787/spack,iulian787/spack,iulian787/spack,LLNL/spack,LLNL/spack | var/spack/repos/builtin/packages/py-vcstool/package.py | var/spack/repos/builtin/packages/py-vcstool/package.py | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class PyVcstool(PythonPackage):
"""vcstool enables batch commands on multiple different vcs repositories.
Currently it supports git, hg, svn and bzr."""
homepage = "https://github.com/dirk-thomas/vcstool"
url = "https://pypi.io/packages/source/v/vcstool/vcstool-0.2.15.tar.gz"
version('0.2.15', sha256='b1fce6fcef7b117b245a72dc8658a128635749d01dc7e9d1316490f89f9c2fde')
depends_on('py-pyyaml', type=('build', 'run'))
depends_on('py-setuptools', type=('build', 'run'))
depends_on('py-argparse', when='^python@:2.6', type=('build', 'run'))
| lgpl-2.1 | Python | |
701f6a06b8405620905a67b47c5702c100a1447a | Check to make sure the input file is sorted | hms-dbmi/clodius,hms-dbmi/clodius | scripts/check_sorted.py | scripts/check_sorted.py | import sys
prev_val = 0
prev_val2 = 0
counter = 0
for line in sys.stdin:
parts = line.split()
curr_val = int(parts[0])
curr_val2 = int(parts[1])
val1 = int(parts[0])
val2 = int(parts[1])
if val1 > val2:
print >>sys.stderr, "Not triangular:", counter
sys.exit(1)
if curr_val < prev_val:
print >>sys.stderr, "Not sorted, line:", counter
sys.exit(1)
elif curr_val == prev_val:
if (curr_val2 < prev_val2):
print >>sys.stderr, "Not sorted, line:", counter
sys.exit(1)
prev_val = curr_val
prev_val2 = curr_val2
counter += 1
if counter % 1000000 == 0:
print "counter:", counter, prev_val, curr_val
| mit | Python | |
0aa5466be1ba678f0428e825def010a5007059c7 | Modify tests to show Unicode handling regression | hashamali/pyScss,Kronuz/pyScss,hashamali/pyScss,cpfair/pyScss,Kronuz/pyScss,hashamali/pyScss,Kronuz/pyScss,Kronuz/pyScss,cpfair/pyScss,cpfair/pyScss | scss/tests/test_misc.py | scss/tests/test_misc.py | # -*- encoding: utf-8 -*-
"""Tests for miscellaneous features that should maybe be broken out into their
own files, maybe.
"""
from scss import Scss
def test_super_selector():
compiler = Scss(scss_opts=dict(style='expanded'))
input = """\
foo, bar {
a: b;
}
baz {
c: d;
}
"""
expected = """\
super foo, super bar {
a: b;
}
super baz {
c: d;
}
"""
output = compiler.compile(input, super_selector='super')
assert expected == output
def test_debug_info():
# nb: debug info doesn't work if the source isn't a file
compiler = Scss(scss_opts=dict(style='expanded', debug_info=True))
compiler._scss_files = {}
compiler._scss_files['input.css'] = """\
div {
color: green;
}
table {
color: red;
}
"""
expected = """\
@media -sass-debug-info{filename{font-family:file\:\/\/input\.css}line{font-family:\\000031}}
div {
color: green;
}
@media -sass-debug-info{filename{font-family:file\:\/\/input\.css}line{font-family:\\000034}}
table {
color: red;
}
"""
output = compiler.compile()
assert expected == output
def test_live_errors():
compiler = Scss(live_errors=True)
output = compiler.compile("""$foo: unitless(one);""")
assert "body:before" in output
assert "TypeError: Expected" in output
def test_extend_across_files():
compiler = Scss(scss_opts=dict(compress=0))
compiler._scss_files = {}
compiler._scss_files['first.css'] = '''
@option style:legacy, short_colors:yes, reverse_colors:yes;
.specialClass extends .basicClass {
padding: 10px;
font-size: 14px;
}
'''
compiler._scss_files['second.css'] = '''
@option style:legacy, short_colors:yes, reverse_colors:yes;
.basicClass {
padding: 20px;
background-color: #FF0000;
}
'''
actual = compiler.compile()
expected = """\
.basicClass, .specialClass {
padding: 20px;
background-color: #FF0000;
}
.specialClass {
padding: 10px;
font-size: 14px;
}
"""
assert expected == actual
def test_unicode_files():
compiler = Scss(scss_opts=dict(style='expanded'))
unicode_input = u"""q {
quotes: "“" "”" "‘" "’";
content: "•";
}
"""
output = compiler.compile(unicode_input)
assert output == unicode_input
| # -*- encoding: utf-8 -*-
"""Tests for miscellaneous features that should maybe be broken out into their
own files, maybe.
"""
from scss import Scss
def test_super_selector():
compiler = Scss(scss_opts=dict(style='expanded'))
input = """\
foo, bar {
a: b;
}
baz {
c: d;
}
"""
expected = """\
super foo, super bar {
a: b;
}
super baz {
c: d;
}
"""
output = compiler.compile(input, super_selector='super')
assert expected == output
def test_debug_info():
# nb: debug info doesn't work if the source isn't a file
compiler = Scss(scss_opts=dict(style='expanded', debug_info=True))
compiler._scss_files = {}
compiler._scss_files['input.css'] = """\
div {
color: green;
}
table {
color: red;
}
"""
expected = """\
@media -sass-debug-info{filename{font-family:file\:\/\/input\.css}line{font-family:\\000031}}
div {
color: green;
}
@media -sass-debug-info{filename{font-family:file\:\/\/input\.css}line{font-family:\\000034}}
table {
color: red;
}
"""
output = compiler.compile()
assert expected == output
def test_live_errors():
compiler = Scss(live_errors=True)
output = compiler.compile("""$foo: unitless(one);""")
assert "body:before" in output
assert "TypeError: Expected" in output
def test_extend_across_files():
compiler = Scss(scss_opts=dict(compress=0))
compiler._scss_files = {}
compiler._scss_files['first.css'] = '''
@option style:legacy, short_colors:yes, reverse_colors:yes;
.specialClass extends .basicClass {
padding: 10px;
font-size: 14px;
}
'''
compiler._scss_files['second.css'] = '''
@option style:legacy, short_colors:yes, reverse_colors:yes;
.basicClass {
padding: 20px;
background-color: #FF0000;
}
'''
actual = compiler.compile()
expected = """\
.basicClass, .specialClass {
padding: 20px;
background-color: #FF0000;
}
.specialClass {
padding: 10px;
font-size: 14px;
}
"""
assert expected == actual
def test_unicode_files():
compiler = Scss(scss_opts=dict(style='expanded'))
unicode_input = u"""q {
quotes: "“" "”" "‘" "’";
}
"""
output = compiler.compile(unicode_input)
assert output == unicode_input
| mit | Python |
b2532cfeb3541a64143ded6d86b635e2c9049080 | Clean up some pylint warnings | linas/link-grammar,opencog/link-grammar,ampli/link-grammar,ampli/link-grammar,ampli/link-grammar,MadBomber/link-grammar,opencog/link-grammar,opencog/link-grammar,opencog/link-grammar,MadBomber/link-grammar,opencog/link-grammar,ampli/link-grammar,ampli/link-grammar,linas/link-grammar,opencog/link-grammar,MadBomber/link-grammar,MadBomber/link-grammar,linas/link-grammar,MadBomber/link-grammar,linas/link-grammar,opencog/link-grammar,ampli/link-grammar,MadBomber/link-grammar,MadBomber/link-grammar,opencog/link-grammar,linas/link-grammar,ampli/link-grammar,ampli/link-grammar,linas/link-grammar,linas/link-grammar,ampli/link-grammar,MadBomber/link-grammar,linas/link-grammar,opencog/link-grammar,linas/link-grammar | bindings/python-examples/example.py | bindings/python-examples/example.py | #! /usr/bin/env python
# -*- coding: utf8 -*-
#
# Link Grammar example usage
#
import locale
from linkgrammar import Sentence, ParseOptions, Dictionary
# from linkgrammar import _clinkgrammar as clg
locale.setlocale(locale.LC_ALL, "en_US.UTF-8")
po = ParseOptions()
def desc(lkg):
print lkg.diagram()
print 'Postscript:'
print lkg.postscript()
print '---'
# English is the default language
sent = Sentence("This is a test.", Dictionary(), po)
linkages = sent.parse()
print "English: found ", sent.num_valid_linkages(), "linkages"
for linkage in linkages:
desc(linkage)
# Russian
sent = Sentence("это большой тест.", Dictionary('ru'), po)
linkages = sent.parse()
print "Russian: found ", sent.num_valid_linkages(), "linkages"
for linkage in linkages:
desc(linkage)
# Turkish
sent = Sentence("çok şişman adam geldi", Dictionary('tr'), po)
linkages = sent.parse()
print "Turkish: found ", sent.num_valid_linkages(), "linkages"
for linkage in linkages:
desc(linkage)
| #! /usr/bin/env python
# -*- coding: utf8 -*-
#
# Link Grammar example usage
#
import locale
from linkgrammar import Sentence, ParseOptions, Dictionary
# from linkgrammar import _clinkgrammar as clg
locale.setlocale(locale.LC_ALL, "en_US.UTF-8")
po = ParseOptions()
def desc(linkage):
print linkage.diagram()
print 'Postscript:'
print linkage.postscript()
print '---'
# English is the default language
sent = Sentence("This is a test.", Dictionary(), po)
linkages = sent.parse()
print "English: found ", sent.num_valid_linkages(), "linkages"
for linkage in linkages:
desc(linkage)
# Russian
sent = Sentence("это большой тест.", Dictionary('ru'), po)
linkages = sent.parse()
print "Russian: found ", sent.num_valid_linkages(), "linkages"
for linkage in linkages:
desc(linkage)
# Turkish
sent = Sentence("çok şişman adam geldi", Dictionary('tr'), po)
linkages = sent.parse()
print "Turkish: found ", sent.num_valid_linkages(), "linkages"
for linkage in linkages:
desc(linkage)
| lgpl-2.1 | Python |
39473b1aa0d8c54b0fb43b5e97545596ed087d59 | Create set-intersection-size-at-least-two.py | tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015 | Python/set-intersection-size-at-least-two.py | Python/set-intersection-size-at-least-two.py | # Time: O(nlogn)
# Space: O(n)
# An integer interval [a, b] (for integers a < b) is a set of all consecutive integers from a to b,
# including a and b.
#
# Find the minimum size of a set S such that for every integer interval A in intervals,
# the intersection of S with A has size at least 2.
#
# Example 1:
# Input: intervals = [[1, 3], [1, 4], [2, 5], [3, 5]]
# Output: 3
# Explanation:
# Consider the set S = {2, 3, 4}. For each interval, there are at least 2 elements from S in the interval.
# Also, there isn't a smaller size set that fulfills the above condition.
# Thus, we output the size of this set, which is 3.
#
# Example 2:
# Input: intervals = [[1, 2], [2, 3], [2, 4], [4, 5]]
# Output: 5
# Explanation:
# An example of a minimum sized set is {1, 2, 3, 4, 5}.
#
# Note:
# intervals will have length in range [1, 3000].
# intervals[i] will have length 2, representing some integer interval.
# intervals[i][j] will be an integer in [0, 10^8].
# greedy solution
class Solution(object):
def intersectionSizeTwo(self, intervals):
"""
:type intervals: List[List[int]]
:rtype: int
"""
intervals.sort(key = lambda(s, e): (s, -e))
cnts = [2] * len(intervals)
result = 0
while intervals:
(start, _), cnt = intervals.pop(), cnts.pop()
for s in xrange(start, start+cnt):
for i in xrange(len(intervals)):
if cnts[i] and s <= intervals[i][1]:
cnts[i] -= 1
result += cnt
return result
| mit | Python | |
208fed6d1e162dd0fcfa10c2b79d0d35ea813478 | Create intermediate-171.py | MaximeKjaer/dailyprogrammer-challenges | Challenge-171/Intermediate/intermediate-171.py | Challenge-171/Intermediate/intermediate-171.py | #Challenge 171 Intermediate
hexvalue = 'FF 81 BD A5 A5 BD 81 FF'.split(' ')
binary = [bin(int(line, 16))[2:].zfill(8) for line in hexvalue] #Convert it to a list of binary lines
image = [pixel.replace('1', '*').replace('0', ' ') for pixel in binary] #Convert it to a list of lines
print 'ORIGINAL IMAGE'
print '\n'.join(image)
def zoom(image, factor):
if factor%1==0 and factor>=1:
return '\n'.join([''.join([str(pixel)*factor for pixel in line]) for line in image for i in range(factor)])
else:
a = int(1/factor)
return '\n'.join([line[::a] for line in image[::a]])
def invert(image):
return '\n'.join([line.replace('*', '#').replace(' ', '*').replace('#', ' ') for line in image])
def rotate_clockwise(image):
image = [list(line) for line in image] #We create a pixel matrix
a = len(image)
new = [[[] for _ in range(a)] for _ in range(a)]
for x in range(a):
for y in range(a):
new[y][a-1-x] = image[x][y]
return '\n'.join([''.join(line) for line in new])
def rotate_counter_clockwise(image):
image = [list(line) for line in image] #We create a pixel matrix
a = len(image)
new = [[[] for _ in range(a)] for _ in range(a)]
for x in range(a):
for y in range(a):
new[a-1-y][x] = image[x][y]
return '\n'.join([''.join(line) for line in new])
def prepare_for_next(image):
return image.split('\n')
#Now to the actual Challenge:
zoomed = zoom(image, 2)
image = prepare_for_next(zoomed)
rotated = rotate_clockwise(image)
image = prepare_for_next(rotated)
zoomed = zoom(image,2)
image = prepare_for_next(zoomed)
inverted = invert(image)
print inverted
image = prepare_for_next(inverted)
zoomed_out = zoom(image, 0.5)
print zoomed_out
| mit | Python | |
752b5b43aa807e5431615219d40eafd38cacadeb | Increase length of report name on Report model | knowledge4life/django-onmydesk,alissonperez/django-onmydesk,alissonperez/django-onmydesk,knowledge4life/django-onmydesk,knowledge4life/django-onmydesk,alissonperez/django-onmydesk | onmydesk/models.py | onmydesk/models.py | """
Required models to handle and store generated reports.
"""
from django.db import models
from django.conf import settings
from onmydesk.utils import my_import
ONMYDESK_FILE_HANDLER = getattr(settings, 'ONMYDESK_FILE_HANDLER', None)
def output_file_handler(filepath):
"""
Returns the output filepath (handled or not by an external function).
This function tries to find a function handler in `settings.ONMYDESK_FILE_HANDLER`. It
must receive a filepath and returns a new filepath (or url, e.g.) to be stored in the
report register. It's useful to handle the report results (move to other dirs ou to cloud).
:param str filepath: File path to output generated by report.
:returns: File path to output (processed or not by a external handler)
:rtype: str
"""
function_handler = ONMYDESK_FILE_HANDLER
if not function_handler:
return filepath
handler = my_import(function_handler)
return handler(filepath)
class Report(models.Model):
"""Report model to store generated reports"""
report = models.CharField(max_length=255)
results = models.CharField(max_length=255, null=True, blank=True)
insert_date = models.DateTimeField('Creation Date', auto_now_add=True)
update_date = models.DateTimeField('Update Date', auto_now=True)
created_by = models.ForeignKey(
settings.AUTH_USER_MODEL, on_delete=models.CASCADE, null=True)
def process(self, report_params=None):
"""Process this report. After processing the outputs will be stored at `results`.
To access output results is recommended to use :func:`results_as_list`.
:param dict report_params: Dictionary with params to be used for process report.
"""
report_class = my_import(self.report)
report = report_class(params=report_params)
report.process()
results = []
for filepath in report.output_filepaths:
results.append(output_file_handler(filepath))
self.results = ';'.join(results)
@property
def results_as_list(self):
"""Returns a list of output results stored in this model
:returns: List of results
:rtype: list"""
if not self.results:
return []
return self.results.split(';')
| """
Required models to handle and store generated reports.
"""
from django.db import models
from django.conf import settings
from onmydesk.utils import my_import
ONMYDESK_FILE_HANDLER = getattr(settings, 'ONMYDESK_FILE_HANDLER', None)
def output_file_handler(filepath):
"""
Returns the output filepath (handled or not by an external function).
This function tries to find a function handler in `settings.ONMYDESK_FILE_HANDLER`. It
must receive a filepath and returns a new filepath (or url, e.g.) to be stored in the
report register. It's useful to handle the report results (move to other dirs ou to cloud).
:param str filepath: File path to output generated by report.
:returns: File path to output (processed or not by a external handler)
:rtype: str
"""
function_handler = ONMYDESK_FILE_HANDLER
if not function_handler:
return filepath
handler = my_import(function_handler)
return handler(filepath)
class Report(models.Model):
"""Report model to store generated reports"""
report = models.CharField(max_length=30)
results = models.CharField(max_length=255, null=True, blank=True)
insert_date = models.DateTimeField('Creation Date', auto_now_add=True)
update_date = models.DateTimeField('Update Date', auto_now=True)
created_by = models.ForeignKey(
settings.AUTH_USER_MODEL, on_delete=models.CASCADE, null=True)
def process(self, report_params=None):
"""Process this report. After processing the outputs will be stored at `results`.
To access output results is recommended to use :func:`results_as_list`.
:param dict report_params: Dictionary with params to be used for process report.
"""
report_class = my_import(self.report)
report = report_class(params=report_params)
report.process()
results = []
for filepath in report.output_filepaths:
results.append(output_file_handler(filepath))
self.results = ';'.join(results)
@property
def results_as_list(self):
"""Returns a list of output results stored in this model
:returns: List of results
:rtype: list"""
if not self.results:
return []
return self.results.split(';')
| mit | Python |
bdf5cfb2a7b716d897dabd62e591caad8144a029 | Add election funding parsing script | kansanmuisti/kamu,kansanmuisti/kamu,kansanmuisti/kamu,kansanmuisti/kamu,kansanmuisti/kamu | utils/populate-funding.py | utils/populate-funding.py | #!/usr/bin/python
import os
import sys
import csv
from optparse import OptionParser
from django.core.management import setup_environ
my_path = os.path.abspath(os.path.dirname(__file__))
app_path = os.path.normpath(my_path + '/..')
app_base = app_path + '/'
# We need a path like '<app_path>/utils:<app_path>:<app_path>/..'
# The first one is inserted by python itself. The order is important to
# guarantee that we'll import the proper app specific module in case there
# is also a generic (non-app-specific) module with the same name later in
# the path.
sys.path.insert(1, app_path)
sys.path.insert(2, os.path.normpath(app_path + '/..'))
from kamu import settings
setup_environ(settings)
from django.db import connection, transaction
from django import db
from votes.models import Member, TermMember, Term, MemberStats
parser = OptionParser()
parser.add_option('--input', action='store', type='string', dest='input',
help='input file')
(opts, args) = parser.parse_args()
if not opts.input:
exit(1)
MEMBER_NAME_TRANSFORMS = {
'Korhonen Timo': 'Korhonen Timo V.',
'Ollila Heikki': 'Ollila Heikki A.',
'Saarela Tanja': 'Karpela Tanja',
'Kumpula Miapetra': 'Kumpula-Natri Miapetra',
'Forsius-Harkimo Merikukka': 'Forsius Merikukka',
}
TERM="2007-2010"
term = Term.objects.get(name=TERM)
f = open(opts.input, 'r')
reader = csv.reader(f, delimiter=',', quotechar='"')
for row in reader:
first_name = row[1].strip()
last_name = row[0].strip()
budget = row[4].strip().replace(',', '')
name = "%s %s" % (last_name, first_name)
if name in MEMBER_NAME_TRANSFORMS:
name = MEMBER_NAME_TRANSFORMS[name]
print "%-20s %-20s %10s" % (first_name, last_name, budget)
try:
member = Member.objects.get(name=name)
tm = TermMember.objects.get(member=member, term=term)
except Member.DoesNotExist:
continue
except TermMember.DoesNotExist:
continue
ms = MemberStats.objects.get(begin=term.begin, end=term.end, member=member)
tm.election_budget = budget
tm.save()
ms.election_budget = budget
ms.save()
f.close()
| agpl-3.0 | Python | |
30b53c525b2319cc664d26d083c84bba1b63ff7c | add unit test for s3 cache | camptocamp/mapproxy,vrsource/mapproxy,mapproxy/mapproxy,olt/mapproxy,olt/mapproxy,mapproxy/mapproxy,vrsource/mapproxy,camptocamp/mapproxy,drnextgis/mapproxy,drnextgis/mapproxy | mapproxy/test/unit/test_cache_s3.py | mapproxy/test/unit/test_cache_s3.py | # This file is part of the MapProxy project.
# Copyright (C) 2011 Omniscale <http://omniscale.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
import os
import random
from nose.plugins.skip import SkipTest
from mapproxy.cache.s3 import S3Cache
from mapproxy.test.unit.test_cache_tile import TileCacheTestBase
class TestS3Cache(TileCacheTestBase):
always_loads_metadata = True
def setup(self):
if not os.environ.get('MAPPROXY_TEST_S3'):
raise SkipTest()
bucket_name = os.environ['MAPPROXY_TEST_S3']
dir_name = 'mapproxy/test_%d' % random.randint(0, 100000)
TileCacheTestBase.setup(self)
self.cache = S3Cache(dir_name, file_ext='png', directory_layout='tms',
lock_timeout=10, bucket_name=bucket_name, profile_name=None)
def teardown(self):
TileCacheTestBase.teardown(self)
| apache-2.0 | Python | |
a332a057292e701e197b5ac2250e608ef953d631 | Add example config | diath/pyfsw,diath/pyfsw,diath/pyfsw | pyfsw/config.example.py | pyfsw/config.example.py | # Database URI Scheme (Refer to the SQLAlchemy documentation for variations)
DB_URI = ''
# Secret Key
SECRET_KEY = 'pyfsw'
# Network Host
NET_HOST = '127.0.0.1'
# Network Port
NET_PORT = 5000
# Debug Mode
DEBUG = False
# Debug Profiler
DEBUG_PROFILER = False
# Date Format
DATE_FORMAT = '%m/%d/%y %I:%M %p'
# Cache Time (Seconds)
CACHE_TIME = 0
# Guild Logo Upload Path
UPLOAD_PATH = ''
# Font Path (Captcha)
FONT_PATH = ''
# Server Name
SERVER_NAME = ''
# Town Names
TOWNS = {
0: 'All',
1: 'Some Town'
}
# House Price (per SQM)
HOUSE_PRICE = 1000
# Vocation Names
VOCATIONS = {
0: 'No Vocation',
1: 'Sorcerer',
2: 'Druid',
3: 'Paladin',
4: 'Knight',
5: 'Master Sorcerer',
6: 'Elder Druid',
7: 'Royal Paladin',
8: 'Elite Knight'
}
# Gender Names
GENDERS = {
0: 'Female',
1: 'Male'
}
NEW_CHARACTER = {
'genders': [0, 1],
'vocations': [5, 6, 7, 8],
'towns': [1, 2],
'outfit': [0, 0, 0, 0]
}
# Quests List
QUESTS = [
{'name': 'Example Quest', 'key': 12101, 'value': 1},
]
# Achievements List
ACHIEVEMENTS = [
{'name': 'Example Achievement', 'key': 12101, 'value': 1}
]
| mit | Python | |
9bed52b93061fea7381492ffe0ce55c6929eab78 | Add tests.py to app skeleton. | lsgunth/rapidsms,peterayeni/rapidsms,dimagi/rapidsms,lsgunth/rapidsms,dimagi/rapidsms,catalpainternational/rapidsms,catalpainternational/rapidsms,eHealthAfrica/rapidsms,rapidsms/rapidsms-core-dev,ken-muturi/rapidsms,ehealthafrica-ci/rapidsms,eHealthAfrica/rapidsms,peterayeni/rapidsms,caktus/rapidsms,rapidsms/rapidsms-core-dev,eHealthAfrica/rapidsms,lsgunth/rapidsms,unicefuganda/edtrac,peterayeni/rapidsms,dimagi/rapidsms-core-dev,peterayeni/rapidsms,ehealthafrica-ci/rapidsms,catalpainternational/rapidsms,ehealthafrica-ci/rapidsms,caktus/rapidsms,catalpainternational/rapidsms,lsgunth/rapidsms,dimagi/rapidsms-core-dev,unicefuganda/edtrac,unicefuganda/edtrac,ken-muturi/rapidsms,ken-muturi/rapidsms,caktus/rapidsms | lib/rapidsms/skeleton/app/tests.py | lib/rapidsms/skeleton/app/tests.py | from rapidsms.tests.scripted import TestScript
from app import App
class TestApp (TestScript):
apps = (App,)
# define your test scripts here.
# e.g.:
#
# testRegister = """
# 8005551212 > register as someuser
# 8005551212 < Registered new user 'someuser' for 8005551212!
# 8005551212 > tell anotheruser what's up??
# 8005550000 < someuser said "what's up??"
# """
#
# You can also do normal unittest.TestCase methods:
#
# def testMyModel (self):
# self.assertEquals(...)
| bsd-3-clause | Python | |
4c78124a434d4f953d5811ee2708eaf051bd591e | Create setup_data_libraries.py | bgruening/galaxy-rna-workbench,mwolfien/galaxy-rna-workbench,mwolfien/galaxy-rna-workbench,bgruening/galaxy-rna-workbench,mwolfien/galaxy-rna-workbench,bgruening/galaxy-rna-workbench | setup_data_libraries.py | setup_data_libraries.py | #!/usr/bin/env python
import argparse
import logging as log
import sys
import time
import yaml
from bioblend import galaxy
def setup_data_libraries(gi, data):
"""
Load files into a Galaxy data library.
By default all test-data tools from all installed tools
will be linked into a data library.
"""
log.info("Importing data libraries.")
jc = galaxy.jobs.JobsClient(gi)
folders = dict()
libraries = yaml.load(data)
for lib in libraries['libraries']:
folders[lib['name']] = lib['files']
if folders:
log.info("Create 'Test Data' library.")
lib = gi.libraries.create_library('Training Data', 'Data pulled from online archives.')
lib_id = lib['id']
for fname, urls in folders.items():
log.info("Creating folder: %s" % fname)
folder = gi.libraries.create_folder(lib_id, fname)
for url in urls:
gi.libraries.upload_file_from_url(
lib_id,
url['url'],
folder_id=folder[0]['id'],
file_type=url['file_type']
)
no_break = True
while True:
no_break = False
for job in jc.get_jobs():
if job['state'] != 'ok':
no_break = True
if not no_break:
break
time.sleep(3)
time.sleep(20)
log.info("Finished importing test data.")
def main():
parser = argparse.ArgumentParser(
description='Populate the Galaxy data library with test data.'
)
parser.add_argument("-v", "--verbose", help="Increase output verbosity.",
action="store_true")
parser.add_argument('-i', '--infile', type=argparse.FileType('r'))
parser.add_argument("-g", "--galaxy",
help="Target Galaxy instance URL/IP address.")
parser.add_argument("-u", "--user",
help="Galaxy user name")
parser.add_argument("-p", "--password",
help="Password for the Galaxy user")
parser.add_argument("-a", "--api_key",
dest="api_key",
help="Galaxy admin user API key (required if not defined in the tools list file)")
args = parser.parse_args()
if args.user and args.password:
gi = galaxy.GalaxyInstance(url=args.galaxy, email=args.user, password=args.password)
elif args.api_key:
gi = galaxy.GalaxyInstance(url=args.galaxy, key=args.api_key)
else:
sys.exit('Please specify either a valid Galaxy username/password or an API key.')
if args.verbose:
log.basicConfig(level=log.DEBUG)
setup_data_libraries(gi, args.infile)
if __name__ == '__main__':
main()
| mit | Python | |
d78872da09bc67435a2662cce0b253ab149b2bad | Create 03.py | ezralalonde/cloaked-octo-sansa | 02.5/03.py | 02.5/03.py | # By Websten from forums
#
# Given your birthday and the current date, calculate your age in days.
# Compensate for leap days.
# Assume that the birthday and current date are correct dates (and no time travel).
# Simply put, if you were born 1 Jan 2012 and todays date is 2 Jan 2012
# you are 1 day old.
#
# Hint
# A whole year is 365 days, 366 if a leap year.
def nextDay(year, month, day):
"""Simple version: assume every month has 30 days"""
if day < 30:
return year, month, day + 1
else:
if month == 12:
return year + 1, 1, 1
else:
return year, month + 1, 1
def dateIsAfter(year1, month1, day1, year2, month2, day2):
"""Returns True if year1-month1-day1 is after year2-month2-day2. Otherwise, returns False."""
if year1 > year2:
return True
if year1 == year2:
if month1 > month2:
return True
if month1 == month2:
return day1 > day2
return False
def daysBetweenDates(year1, month1, day1, year2, month2, day2):
"""Returns the number of days between year1/month1/day1
and year2/month2/day2. Assumes inputs are valid dates
in Gregorian calendar."""
# program defensively! Add an assertion if the input is not valid!
assert(not dateIsAfter(year1, month1, day1, year2, month2, day2))
days = 0
while dateIsAfter(year2, month2, day2, year1, month1, day1):
days += 1
(year1, month1, day1) = nextDay(year1, month1, day1)
return days
def test():
test_cases = [((2012,9,30,2012,10,30),30),
((2012,1,1,2013,1,1),360),
((2012,9,1,2012,9,4),3),
((2013,1,1,1999,12,31), "AssertionError")]
for (args, answer) in test_cases:
try:
result = daysBetweenDates(*args)
if result != answer:
print "Test with data:", args, "failed"
else:
print "Test case passed!"
except AssertionError:
if answer == "AssertionError":
print "Nice job! Test case {0} correctly raises AssertionError!\n".format(args)
else:
print "Check your work! Test case {0} should not raise AssertionError!\n".format(args)
test()
| bsd-2-clause | Python | |
d2b3996edc1af3f7f491354a762b8bd34c8345a1 | Create remove_string_spaces.py | Kunalpod/codewars,Kunalpod/codewars | remove_string_spaces.py | remove_string_spaces.py | #Kunal Gautam
#Codewars : @Kunalpod
#Problem name: Remove String Spaces
#Problem level: 8 kyu
def no_space(x):
return ''.join(x.split())
| mit | Python | |
8ea44bc5daa099ccc2e48c606f38a424235b9f3d | Create a.py | y-sira/atcoder,y-sira/atcoder | abc001/a.py | abc001/a.py | h1 = int(input())
h2 = int(input())
print(h1 - h2)
| mit | Python | |
947570fcc24458c4d7d6e44db0849abdf8055ccb | Add script for generating manifests | arcticfoxnv/coldbrew,arcticfoxnv/coldbrew,arcticfoxnv/coldbrew | packer/manifest.py | packer/manifest.py | #!/usr/bin/env python
import argparse
import hashlib
import json
class Manifest(object):
def __init__(self, name=None, description=None, versions=None):
self.name = name
self.description = description
self.versions = versions
def load(self, filename):
with open(filename, 'rb') as f:
data = json.load(f)
self.name = data['name']
self.description = data['description']
self.versions = []
for i in data['versions']:
providers = [ManifestProvider(**j) for j in i['providers']]
self.versions.append(ManifestVersion(i['version'], providers))
def save(self, filename):
data = {
'name': self.name,
'description': self.description,
'versions': [x.to_dict() for x in self.versions],
}
with open(filename, 'wb') as f:
json.dump(data, f, indent=2)
def add_version(self, version, provider, url, checksum, checksum_type='sha256'):
if self.versions is None:
self.versions = []
prov = ManifestProvider(
name=provider,
url=url,
checksum=checksum,
checksum_type=checksum_type,
)
for v in self.versions:
if v.version == version:
for p in v.providers:
if p.name == provider:
p.url = url
p.checksum = checksum
return
v.providers.append(prov)
return
ver = ManifestVersion(version=version, providers=[prov])
self.versions.append(ver)
return
class ManifestVersion(object):
def __init__(self, version, providers):
self.version = version
self.providers = providers
def to_dict(self):
return {
'version': self.version,
'providers': [x.to_dict() for x in self.providers],
}
class ManifestProvider(object):
def __init__(self, name, url, checksum_type, checksum):
self.name = name
self.url = url
self.checksum_type = checksum_type
self.checksum = checksum
def to_dict(self):
return {
'name': self.name,
'url': self.url,
'checksum_type': self.checksum_type,
'checksum': self.checksum,
}
def get_box_file_checksum(filename):
with open(filename, 'rb') as f:
m = hashlib.sha256()
chunk = f.read(4096)
while len(chunk) > 0:
m.update(chunk)
chunk = f.read(4096)
return m.hexdigest()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='A tool for creating and updating Vagrant box manifests')
parser.add_argument('--update', action='store_true', help='Update the manifest instead of creating a new one')
parser.add_argument('--description', action='store', required=True, help='Box description')
parser.add_argument('filename', type=str, help='Name of manifest file')
parser.add_argument('box_filename', type=str, help='Box filename')
parser.add_argument('name', type=str, help='Box name')
parser.add_argument('version', type=str, help='Box version')
parser.add_argument('provider', type=str, help='Box provider type')
parser.add_argument('url', type=str, help='URL where box will be available')
args = parser.parse_args()
manifest = Manifest(
name=args.name,
description=args.description,
)
if args.update:
manifest.load(args.filename)
checksum = get_box_file_checksum(args.box_filename)
manifest.add_version(
version=args.version,
provider=args.provider,
url=args.url,
checksum=checksum,
checksum_type='sha256',
)
manifest.save(args.filename)
| mit | Python | |
730b11a45696b4d4b8b0e56c0028ec6eeca7da4f | Create a.py | y-sira/atcoder,y-sira/atcoder | agc017/a.py | agc017/a.py | import math
def comb(n, r):
return math.factorial(n) / math.factorial(r) / math.factorial(n - r)
def main():
n, p = map(int, input().split())
a = tuple(map(lambda x: int(x) % 2, input().split()))
if n == 1 and a[0] % 2 != p:
print(0)
return 0
t = len(tuple(filter(lambda x: x == 1, a)))
f = n - t
f_comb = 0
for j in range(f + 1):
f_comb += comb(f, j)
t_comb = 0
if p == 0:
for i in range(t + 1):
if i % 2 == 0:
t_comb += comb(t, i)
else:
for i in range(t + 1):
if i % 2 == 1:
t_comb += comb(t, i)
print(int(t_comb * f_comb))
return 0
if __name__ == '__main__':
main()
| mit | Python | |
15b1779475c7744a85e948c419de34be038fba94 | Add lc0314_binary_tree_vertical_order_traversal.py | bowen0701/algorithms_data_structures | lc0314_binary_tree_vertical_order_traversal.py | lc0314_binary_tree_vertical_order_traversal.py | """Leetcode 314. Binary Tree Vertical Order Traversal
Medium
URL: https://leetcode.com/problems/binary-tree-vertical-order-traversal/
Given a binary tree, return the vertical order traversal of its nodes' values.
(ie, from top to bottom, column by column).
If two nodes are in the same row and column, the order should be from left to
right.
Examples 1:
Input: [3,9,20,null,null,15,7]
3
/\
/ \
9 20
/\
/ \
15 7
Output:
[
[9],
[3,15],
[20],
[7]
]
Examples 2:
Input: [3,9,8,4,0,1,7]
3
/\
/ \
9 8
/\ /\
/ \/ \
4 01 7
Output:
[
[4],
[9],
[3,0,1],
[8],
[7]
]
Examples 3:
Input: [3,9,8,4,0,1,7,null,null,null,2,5] (0's right child is 2 and 1's left
child is 5)
3
/\
/ \
9 8
/\ /\
/ \/ \
4 01 7
/\
/ \
5 2
Output:
[
[4],
[9,5],
[3,0,1],
[8,2],
[7]
]
"""
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, val):
self.val = val
self.left = None
self.right = None
class Solution(object):
def verticalOrder(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
pass
def main():
pass
if __name__ == '__main__':
main()
| bsd-2-clause | Python | |
934e907180645e3dc618ff5c75a4982656310673 | Add the arrayfns compatibility library -- not finished. | moreati/numpy,bmorris3/numpy,rajathkumarmp/numpy,mwiebe/numpy,nbeaver/numpy,gfyoung/numpy,tynn/numpy,ESSS/numpy,astrofrog/numpy,jonathanunderwood/numpy,pdebuyl/numpy,cowlicks/numpy,moreati/numpy,ahaldane/numpy,mattip/numpy,cjermain/numpy,Srisai85/numpy,Anwesh43/numpy,gfyoung/numpy,charris/numpy,seberg/numpy,ogrisel/numpy,BabeNovelty/numpy,andsor/numpy,kirillzhuravlev/numpy,madphysicist/numpy,BabeNovelty/numpy,pbrod/numpy,bmorris3/numpy,sigma-random/numpy,immerrr/numpy,matthew-brett/numpy,matthew-brett/numpy,jorisvandenbossche/numpy,MSeifert04/numpy,ChristopherHogan/numpy,astrofrog/numpy,rudimeier/numpy,kirillzhuravlev/numpy,madphysicist/numpy,sonnyhu/numpy,NextThought/pypy-numpy,dato-code/numpy,gmcastil/numpy,skwbc/numpy,dwf/numpy,numpy/numpy-refactor,pelson/numpy,skwbc/numpy,seberg/numpy,dwillmer/numpy,bertrand-l/numpy,shoyer/numpy,tynn/numpy,utke1/numpy,behzadnouri/numpy,KaelChen/numpy,mhvk/numpy,SiccarPoint/numpy,maniteja123/numpy,cjermain/numpy,leifdenby/numpy,Linkid/numpy,cjermain/numpy,groutr/numpy,dch312/numpy,grlee77/numpy,groutr/numpy,rgommers/numpy,GrimDerp/numpy,KaelChen/numpy,charris/numpy,rhythmsosad/numpy,CMartelLML/numpy,b-carter/numpy,MaPePeR/numpy,rajathkumarmp/numpy,seberg/numpy,SiccarPoint/numpy,b-carter/numpy,ESSS/numpy,matthew-brett/numpy,mhvk/numpy,WillieMaddox/numpy,argriffing/numpy,rhythmsosad/numpy,Linkid/numpy,MaPePeR/numpy,grlee77/numpy,njase/numpy,Dapid/numpy,rgommers/numpy,sigma-random/numpy,dch312/numpy,BMJHayward/numpy,astrofrog/numpy,behzadnouri/numpy,musically-ut/numpy,KaelChen/numpy,cowlicks/numpy,pdebuyl/numpy,charris/numpy,BabeNovelty/numpy,ahaldane/numpy,pdebuyl/numpy,skwbc/numpy,gmcastil/numpy,hainm/numpy,anntzer/numpy,SunghanKim/numpy,WillieMaddox/numpy,skymanaditya1/numpy,mindw/numpy,skymanaditya1/numpy,Yusa95/numpy,pdebuyl/numpy,endolith/numpy,dch312/numpy,WarrenWeckesser/numpy,hainm/numpy,larsmans/numpy,tacaswell/numpy,tdsmith/numpy,KaelChen/numpy,numpy/numpy,simongibbons/numpy,pizzathief/numpy,maniteja123/numpy,SiccarPoint/numpy,pelson/numpy,simongibbons/numpy,ViralLeadership/numpy,mortada/numpy,shoyer/numpy,grlee77/numpy,ogrisel/numpy,SiccarPoint/numpy,dimasad/numpy,jankoslavic/numpy,andsor/numpy,shoyer/numpy,mingwpy/numpy,ChristopherHogan/numpy,rhythmsosad/numpy,mortada/numpy,dimasad/numpy,mindw/numpy,charris/numpy,MSeifert04/numpy,Eric89GXL/numpy,CMartelLML/numpy,pyparallel/numpy,Eric89GXL/numpy,endolith/numpy,gfyoung/numpy,embray/numpy,jschueller/numpy,dwf/numpy,bmorris3/numpy,pbrod/numpy,sigma-random/numpy,dwillmer/numpy,GaZ3ll3/numpy,AustereCuriosity/numpy,b-carter/numpy,ekalosak/numpy,pbrod/numpy,GaZ3ll3/numpy,yiakwy/numpy,pizzathief/numpy,naritta/numpy,MichaelAquilina/numpy,ekalosak/numpy,bringingheavendown/numpy,nbeaver/numpy,felipebetancur/numpy,brandon-rhodes/numpy,jschueller/numpy,behzadnouri/numpy,stefanv/numpy,argriffing/numpy,githubmlai/numpy,sonnyhu/numpy,drasmuss/numpy,BMJHayward/numpy,rmcgibbo/numpy,bringingheavendown/numpy,mathdd/numpy,jschueller/numpy,sinhrks/numpy,mattip/numpy,tynn/numpy,pyparallel/numpy,brandon-rhodes/numpy,stefanv/numpy,Linkid/numpy,anntzer/numpy,ajdawson/numpy,joferkington/numpy,immerrr/numpy,embray/numpy,Srisai85/numpy,ogrisel/numpy,has2k1/numpy,mhvk/numpy,numpy/numpy,GaZ3ll3/numpy,jakirkham/numpy,rhythmsosad/numpy,cowlicks/numpy,grlee77/numpy,jakirkham/numpy,rherault-insa/numpy,GrimDerp/numpy,MaPePeR/numpy,AustereCuriosity/numpy,moreati/numpy,embray/numpy,Anwesh43/numpy,chiffa/numpy,numpy/numpy-refactor,pelson/numpy,ajdawson/numpy,ChanderG/numpy,sinhrks/numpy,NextThought/pypy-numpy,solarjoe/numpy,jorisvandenbossche/numpy,larsmans/numpy,empeeu/numpy,simongibbons/numpy,dwf/numpy,kiwifb/numpy,nguyentu1602/numpy,Dapid/numpy,rudimeier/numpy,tdsmith/numpy,Linkid/numpy,jonathanunderwood/numpy,GrimDerp/numpy,jorisvandenbossche/numpy,WarrenWeckesser/numpy,stuarteberg/numpy,skymanaditya1/numpy,rgommers/numpy,WarrenWeckesser/numpy,bringingheavendown/numpy,brandon-rhodes/numpy,stuarteberg/numpy,NextThought/pypy-numpy,trankmichael/numpy,ewmoore/numpy,has2k1/numpy,stuarteberg/numpy,NextThought/pypy-numpy,numpy/numpy,ssanderson/numpy,jakirkham/numpy,drasmuss/numpy,felipebetancur/numpy,jakirkham/numpy,Srisai85/numpy,hainm/numpy,BMJHayward/numpy,mingwpy/numpy,ssanderson/numpy,numpy/numpy-refactor,jankoslavic/numpy,nbeaver/numpy,kirillzhuravlev/numpy,trankmichael/numpy,pizzathief/numpy,musically-ut/numpy,andsor/numpy,numpy/numpy,ChristopherHogan/numpy,ESSS/numpy,ekalosak/numpy,nguyentu1602/numpy,rmcgibbo/numpy,ahaldane/numpy,naritta/numpy,andsor/numpy,ChanderG/numpy,MSeifert04/numpy,githubmlai/numpy,jankoslavic/numpy,mhvk/numpy,ddasilva/numpy,pelson/numpy,ViralLeadership/numpy,dato-code/numpy,chatcannon/numpy,jschueller/numpy,ViralLeadership/numpy,musically-ut/numpy,dimasad/numpy,ContinuumIO/numpy,dwillmer/numpy,rherault-insa/numpy,sinhrks/numpy,jorisvandenbossche/numpy,AustereCuriosity/numpy,dch312/numpy,WarrenWeckesser/numpy,pizzathief/numpy,dato-code/numpy,bmorris3/numpy,simongibbons/numpy,mortada/numpy,ogrisel/numpy,utke1/numpy,Yusa95/numpy,MichaelAquilina/numpy,skymanaditya1/numpy,MSeifert04/numpy,ssanderson/numpy,trankmichael/numpy,tdsmith/numpy,joferkington/numpy,Eric89GXL/numpy,rudimeier/numpy,larsmans/numpy,ChanderG/numpy,sinhrks/numpy,Eric89GXL/numpy,argriffing/numpy,empeeu/numpy,gmcastil/numpy,jonathanunderwood/numpy,abalkin/numpy,numpy/numpy-refactor,dwf/numpy,kiwifb/numpy,simongibbons/numpy,jorisvandenbossche/numpy,GaZ3ll3/numpy,mindw/numpy,hainm/numpy,ddasilva/numpy,Anwesh43/numpy,shoyer/numpy,endolith/numpy,rajathkumarmp/numpy,WarrenWeckesser/numpy,tacaswell/numpy,dwf/numpy,solarjoe/numpy,mathdd/numpy,CMartelLML/numpy,dato-code/numpy,matthew-brett/numpy,numpy/numpy-refactor,nguyentu1602/numpy,cowlicks/numpy,WillieMaddox/numpy,sigma-random/numpy,mattip/numpy,mwiebe/numpy,empeeu/numpy,ewmoore/numpy,jakirkham/numpy,dimasad/numpy,brandon-rhodes/numpy,njase/numpy,embray/numpy,shoyer/numpy,ewmoore/numpy,grlee77/numpy,rherault-insa/numpy,endolith/numpy,Srisai85/numpy,chiffa/numpy,bertrand-l/numpy,abalkin/numpy,mortada/numpy,mindw/numpy,ddasilva/numpy,madphysicist/numpy,GrimDerp/numpy,stuarteberg/numpy,Anwesh43/numpy,pizzathief/numpy,yiakwy/numpy,empeeu/numpy,trankmichael/numpy,MichaelAquilina/numpy,mingwpy/numpy,MichaelAquilina/numpy,bertrand-l/numpy,leifdenby/numpy,joferkington/numpy,ahaldane/numpy,has2k1/numpy,githubmlai/numpy,groutr/numpy,tacaswell/numpy,kirillzhuravlev/numpy,Dapid/numpy,cjermain/numpy,SunghanKim/numpy,sonnyhu/numpy,Yusa95/numpy,yiakwy/numpy,chatcannon/numpy,felipebetancur/numpy,ChanderG/numpy,joferkington/numpy,mwiebe/numpy,madphysicist/numpy,chiffa/numpy,drasmuss/numpy,pyparallel/numpy,BMJHayward/numpy,rmcgibbo/numpy,SunghanKim/numpy,rudimeier/numpy,Yusa95/numpy,MSeifert04/numpy,ContinuumIO/numpy,embray/numpy,ewmoore/numpy,ajdawson/numpy,rmcgibbo/numpy,rgommers/numpy,naritta/numpy,naritta/numpy,jankoslavic/numpy,utke1/numpy,MaPePeR/numpy,madphysicist/numpy,anntzer/numpy,rajathkumarmp/numpy,tdsmith/numpy,felipebetancur/numpy,immerrr/numpy,mattip/numpy,chatcannon/numpy,BabeNovelty/numpy,stefanv/numpy,nguyentu1602/numpy,maniteja123/numpy,githubmlai/numpy,abalkin/numpy,stefanv/numpy,ewmoore/numpy,mathdd/numpy,dwillmer/numpy,ekalosak/numpy,ahaldane/numpy,sonnyhu/numpy,yiakwy/numpy,CMartelLML/numpy,solarjoe/numpy,astrofrog/numpy,astrofrog/numpy,mhvk/numpy,matthew-brett/numpy,immerrr/numpy,pelson/numpy,mathdd/numpy,ContinuumIO/numpy,ogrisel/numpy,pbrod/numpy,njase/numpy,larsmans/numpy,mingwpy/numpy,musically-ut/numpy,has2k1/numpy,kiwifb/numpy,stefanv/numpy,seberg/numpy,leifdenby/numpy,pbrod/numpy,ChristopherHogan/numpy,anntzer/numpy,SunghanKim/numpy,ajdawson/numpy | numpy/oldnumeric/arrayfns.py | numpy/oldnumeric/arrayfns.py | """Backward compatible with arrayfns from Numeric
"""
__all__ = ['array_set', 'construct3', 'digitize', 'error', 'find_mask', 'histogram', 'index_sort',
'interp', 'nz', 'reverse', 'span', 'to_corners', 'zmin_zmax']
import numpy as nx
from numpy import asarray
class error(Exception):
pass
def array_set(vals1, indices, vals2):
indices = asarray(indices)
if indices.ndim != 1:
raise ValueError, "index array must be 1-d"
if not isinstance(vals1, ndarray):
raise TypeError, "vals1 must be an ndarray"
vals1 = asarray(vals1)
vals2 = asarray(vals2)
if vals1.ndim != vals2.ndim or vals1.ndim < 1:
raise error, "vals1 and vals2 must have same number of dimensions (>=1)"
vals1[indices] = vals2
def construct3(mask, itype):
raise NotImplementedError
from numpy import digitize
def find_mask(fs, node_edges):
raise NotImplementedError
def histogram(lst, weight=None):
raise NotImplementedError
def index_sort(arr):
return asarray(arr).argsort(kind='heap')
def interp(y, x, z, typ=None):
"""y(z) interpolated by treating y(x) as piecewise function
"""
res = numpy.interp(z, x, y)
if typ is None or typ == 'd':
return res
if typ == 'f':
return res.astype('f')
raise error, "incompatible typecode"
def nz(x):
x = asarray(x,dtype=nx.ubyte)
if x.ndim != 1:
raise TypeError, "intput must have 1 dimension."
indxs = nx.flatnonzero(x != 0)
return indxs[-1].item()+1
def reverse(x, n):
x = asarray(x,dtype='d')
if x.ndim != 2:
raise ValueError, "input must be 2-d"
y = nx.empty_like(x)
if n == 0:
y[...] = x[::-1,:]
elif n == 1:
y[...] = x[:,::-1]
return y
def span(lo, hi, num, d2=0):
x = linspace(lo, hi, num)
if d2 <= 0
return x
else:
ret = empty((d2,num),x.dtype)
ret[...] = x
return ret
def to_corners(arr, nv, nvsum):
raise NotImplementedError
def zmin_zmax(z, ireg):
raise NotImplementedError
| bsd-3-clause | Python | |
d5daa2376fadae0d6715b606a0c355b572efdd0c | Add Python benchmark | stdlib-js/stdlib,stdlib-js/stdlib,stdlib-js/stdlib,stdlib-js/stdlib,stdlib-js/stdlib,stdlib-js/stdlib,stdlib-js/stdlib,stdlib-js/stdlib | lib/node_modules/@stdlib/math/base/dist/beta/kurtosis/benchmark/python/benchmark.scipy.py | lib/node_modules/@stdlib/math/base/dist/beta/kurtosis/benchmark/python/benchmark.scipy.py | #!/usr/bin/env python
"""Benchmark scipy.stats.beta.stats."""
import timeit
name = "beta:kurtosis"
repeats = 3
iterations = 1000
def print_version():
"""Print the TAP version."""
print("TAP version 13")
def print_summary(total, passing):
"""Print the benchmark summary.
# Arguments
* `total`: total number of tests
* `passing`: number of passing tests
"""
print("#")
print("1.." + str(total)) # TAP plan
print("# total " + str(total))
print("# pass " + str(passing))
print("#")
print("# ok")
def print_results(elapsed):
"""Print benchmark results.
# Arguments
* `elapsed`: elapsed time (in seconds)
# Examples
``` python
python> print_results(0.131009101868)
```
"""
rate = iterations / elapsed
print(" ---")
print(" iterations: " + str(iterations))
print(" elapsed: " + str(elapsed))
print(" rate: " + str(rate))
print(" ...")
def benchmark():
"""Run the benchmark and print benchmark results."""
setup = "from scipy.stats import beta; from random import random;"
stmt = "y = beta.stats(random()*10.0 + 1.0, random()*10.0 + 1.0, moments='k')"
t = timeit.Timer(stmt, setup=setup)
print_version()
for i in xrange(repeats):
print("# python::" + name)
elapsed = t.timeit(number=iterations)
print_results(elapsed)
print("ok " + str(i+1) + " benchmark finished")
print_summary(repeats, repeats)
def main():
"""Run the benchmark."""
benchmark()
if __name__ == "__main__":
main()
| apache-2.0 | Python | |
03de607d14805779ed9653b65a5bd5cee3525903 | Add the IFTTT campaign success server plugin | securestate/king-phisher-plugins,zeroSteiner/king-phisher-plugins,securestate/king-phisher-plugins,wolfthefallen/king-phisher-plugins,zeroSteiner/king-phisher-plugins,wolfthefallen/king-phisher-plugins | server/ifttt_on_campaign_success.py | server/ifttt_on_campaign_success.py | import collections
import king_phisher.plugins as plugin_opts
import king_phisher.server.plugins as plugins
import king_phisher.server.signals as signals
import requests
class Plugin(plugins.ServerPlugin):
authors = ['Spencer McIntyre']
title = 'IFTTT Campaign Success Notification'
description = """
A plugin that will publish an event to a specified IFTTT Maker channel when
a campaign has been deemed 'successful'.
"""
homepage = 'https://github.com/securestate/king-phisher-plugins'
options = [
plugin_opts.OptionString(
name='api_key',
description='Maker channel API key'
),
plugin_opts.OptionString(
name='event_name',
description='Maker channel Event name'
)
]
def initialize(self):
signals.db_session_inserted.connect(self.on_kp_db_event, sender='visits')
return True
def on_kp_db_event(self, sender, targets, session):
campaign_ids = collection.deque()
for event in targets:
cid = event.campaign_id
if cid in campaign_ids:
continue
if not self.check_campaign(session, cid):
continue
campaign_ids.append(cid)
self.send_notification()
def check_campaign(self, session, cid):
campaign = db_manager.get_row_by_id(session, db_models.Campaign, cid)
if campaign.has_expired:
# the campaign can not be exipred
return False
unique_targets = session.query(models.Message.target_email)
unique_targets = unique_targets.filter_by(campaign_id=cid)
unique_targets = float(unique_targets.distinct().count())
if unique_targets < 5:
# the campaign needs at least 5 unique targets
return False
success_percentage = 0.25
unique_visits = session.query(models.Visit.message_id)
unique_visits = unique_visits.filter_by(campaign_id=cid)
unique_visits = float(unique_visits.distinct().count())
if unique_visits / unique_targets < success_percentage:
# the campaign is not yet classified as successful
return False
if (unique_visits - 1) / unique_targets >= success_percentage:
# the campaign has already been classified as successful
return False
return True
def send_notification(self):
try:
resp = requests.post("https://maker.ifttt.com/trigger/{0}/with/key/{1}".format(self.config['event_name'], self.config['api_key']))
except Exception as error:
self.logger.error('failed to post a notification of a successful campaign (exception)', exc_info=True)
return
if not resp.ok:
self.logger.error('failed to post a notification of a successful campaign (request)')
return
self.logger.info('successfully posted notification of a successful campaign')
| bsd-3-clause | Python | |
7e7879eb5c0d547a56a082a9b3a444fea59e9156 | Create revEncry.py | NendoTaka/CodeForReference,NendoTaka/CodeForReference,NendoTaka/CodeForReference | Codingame/Python/Clash/revEncry.py | Codingame/Python/Clash/revEncry.py | import sys
import math
# Auto-generated code below aims at helping you parse
# the standard input according to the problem statement.
word = input()
for x in word:
o = ord(x)
k = 122-o
print(chr(97+k),end='')
| mit | Python | |
0d8fe16a3d2c70c24fe9743d8c2d2ac721a1435e | Test case for the previous commit. | apple/swift-lldb,apple/swift-lldb,apple/swift-lldb,llvm-mirror/lldb,llvm-mirror/lldb,llvm-mirror/lldb,llvm-mirror/lldb,apple/swift-lldb,apple/swift-lldb,apple/swift-lldb,llvm-mirror/lldb | test/functionalities/breakpoint/breakpoint_command/TestBreakpointCommandsFromPython.py | test/functionalities/breakpoint/breakpoint_command/TestBreakpointCommandsFromPython.py | """
Test that you can set breakpoint commands successfully with the Python API's:
"""
import os
import re
import unittest2
import lldb, lldbutil
import sys
from lldbtest import *
class PythonBreakpointCommandSettingTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
my_var = 10
@unittest2.skipUnless(sys.platform.startswith("darwin"), "requires Darwin")
@python_api_test
@dsym_test
def test_step_out_with_dsym_python(self):
"""Test stepping out using avoid-no-debug with dsyms."""
self.buildDsym()
self.do_set_python_command_from_python()
@python_api_test
@dwarf_test
def test_step_out_with_dwarf_python(self):
"""Test stepping out using avoid-no-debug with dsyms."""
self.buildDwarf()
self.do_set_python_command_from_python ()
def setUp (self):
TestBase.setUp(self)
self.main_source = "main.c"
self.main_source_spec = lldb.SBFileSpec(self.main_source)
def do_set_python_command_from_python (self):
exe = os.path.join(os.getcwd(), "a.out")
error = lldb.SBError()
self.target = self.dbg.CreateTarget(exe)
self.assertTrue(self.target, VALID_TARGET)
body_bkpt = self.target.BreakpointCreateBySourceRegex("Set break point at this line.", self.main_source_spec)
self.assertTrue(body_bkpt, VALID_BREAKPOINT)
func_bkpt = self.target.BreakpointCreateBySourceRegex("Set break point at this line.", self.main_source_spec)
self.assertTrue(func_bkpt, VALID_BREAKPOINT)
PythonBreakpointCommandSettingTestCase.my_var = 10
error = lldb.SBError()
error = body_bkpt.SetScriptCallbackBody("\
import TestBreakpointCommandsFromPython\n\
TestBreakpointCommandsFromPython.PythonBreakpointCommandSettingTestCase.my_var = 20\n\
print 'Hit breakpoint'")
self.assertTrue (error.Success(), "Failed to set the script callback body: %s."%(error.GetCString()))
self.dbg.HandleCommand("command script import --allow-reload ./bktptcmd.py")
func_bkpt.SetScriptCallbackFunction("bktptcmd.function")
# We will use the function that touches a text file, so remove it first:
self.RemoveTempFile("output2.txt")
# Now launch the process, and do not stop at entry point.
self.process = self.target.LaunchSimple (None, None, self.get_process_working_directory())
self.assertTrue(self.process, PROCESS_IS_VALID)
# Now finish, and make sure the return value is correct.
threads = lldbutil.get_threads_stopped_at_breakpoint (self.process, body_bkpt)
self.assertTrue(len(threads) == 1, "Stopped at inner breakpoint.")
self.thread = threads[0]
self.assertTrue(PythonBreakpointCommandSettingTestCase.my_var == 20)
# Check for the function version as well, which produced this file:
# Remember to clean up after ourselves...
self.assertTrue(os.path.isfile("output2.txt"),
"'output2.txt' exists due to breakpoint command for breakpoint function.")
self.RemoveTempFile("output2.txt")
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lambda: lldb.SBDebugger.Terminate())
unittest2.main()
| apache-2.0 | Python | |
625548dfc54a7f0620a83f62435c6e246dc58d12 | Solve 18. | klen/euler | 018/solution.py | 018/solution.py | """ Project Euler problem #18. """
def problem():
""" Solve the problem.
Find the maximum total from top to bottom of the triangle below.
Answer:
"""
triangle = """
75
95 64
17 47 82
18 35 87 10
20 04 82 47 65
19 01 23 75 03 34
88 02 77 73 07 63 67
99 65 04 28 06 16 70 92
41 41 26 56 83 40 80 70 33
41 48 72 33 47 32 37 16 94 29
53 71 44 65 25 43 91 52 97 51 14
70 11 33 28 77 73 17 78 39 68 17 57
91 71 52 38 17 14 91 43 58 50 27 29 48
63 66 04 68 89 53 67 30 73 16 69 87 40 31
04 62 98 27 23 09 70 98 73 93 38 53 60 04 23
"""
triangle = [
map(int, l.strip().split()) for l in triangle.split('\n') if l.strip()]
gen = iter(reversed(triangle))
sums = next(gen)
def grouper(nodes):
for n in range(len(nodes) - 1):
yield nodes[n], nodes[n+1]
for nodes in gen:
sums = [s + max(nn) for s, nn in zip(nodes, grouper(sums))]
return sums[0]
if __name__ == '__main__':
print problem()
| mit | Python | |
b647416b719c9f0b2534c13a67d3396fefaada47 | Add problem 1 sum muliples of 3 or 5 python solution | ChrisFreeman/project-euler | p001_multiples_of_3_and_5.py | p001_multiples_of_3_and_5.py | #
'''
Project Euler - Problem 1 - Multiples of 3 and 5
https://projecteuler.net/problem=1
If we list all the natural numbers below 10 that are multiples of 3 or 5, we
get 3, 5, 6 and 9. The sum of these multiples is 23.
Find the sum of all the multiples of 3 or 5 below 1000.
'''
import sys
def main():
'''Sum the numbers from 1 through 999 that are multiples of either 3 or 5.
'''
# get list of numbers using list comprehension
numbers = [x for x in range(1, 1000) if x % 3 == 0 or x % 5 == 0]
count = len(numbers)
total = sum(numbers)
# display length of list and the sum of its elements
print("There are {0} numbers from 1 through 999 that are multiples of either"
"3 or 5. Their sum is: {1}".format(count, total))
# One line alternative solution
# sum the output of a generator whose elements are from 1 to 999 and provided
# they are a multiple of 3 or 5 using modulo arithmetic. No intermediate list
# is constructed.
total = sum(x for x in range(1, 1000) if x % 3 == 0 or x % 5 == 0)
print("Alternative: Sum of numbers 1 through 999 that are multiples of either"
" 3 or 5: {0}".format(total))
if __name__ == '__main__':
# interactive run main, capture keyboard interrupts
try:
sys.exit(main())
except KeyboardInterrupt:
pass
| mit | Python | |
6c806f12129d132db17cf601335f638b82a814d6 | Create form.py | MateusJFabricio/ProjVeiculoMapeamentoAutomatico,MateusJFabricio/ProjVeiculoMapeamentoAutomatico | AutoMap/form.py | AutoMap/form.py | import turtle
import Tkinter as tk
def desenha(distancia, angulo, lousa):
lousa.penup()
lousa.home()
lousa.left(angulo)
lousa.pendown()
lousa.forward(distancia)
def main():
app = tk.Tk()
app.title("Mapeamento 2D de ambiente ")
app.fontePadrao = ("Arial", "10", "bold")
app.primeiroContainer = tk.Frame(app)
app.primeiroContainer["padx"] = 20
app.primeiroContainer.pack()
app.segundoContainer = tk.Frame(app)
app.segundoContainer["padx"] = 20
app.segundoContainer.pack()
app.lblControle = tk.Label(app.primeiroContainer, text="Controle de movimento", font=app.fontePadrao)
app.lblControle.pack(side=tk.TOP)
app.frente = tk.Button(app.primeiroContainer)
app.frente["text"] = "Frente"
app.frente["font"] = ("Calibri", "8")
app.frente["width"] = 12
#app.frente["command"] = self.verificaSenha
app.frente.pack()
app.direita = tk.Button(app.primeiroContainer)
app.direita["text"] = "Direita"
app.direita["font"] = ("Calibri", "8")
app.direita["width"] = 12
#app.direita["command"] = self.verificaSenha
app.direita.pack(side=tk.LEFT)
app.esquerda = tk.Button(app.primeiroContainer)
app.esquerda["text"] = "Esquerda"
app.esquerda["font"] = ("Calibri", "8")
app.esquerda["width"] = 12
#app.esquerda["command"] = self.verificaSenha
app.esquerda.pack(side=tk.RIGHT)
app.re = tk.Button(app.primeiroContainer)
app.re["text"] = "Re"
app.re["font"] = ("Calibri", "8")
app.re["width"] = 12
#app.re["command"] = self.verificaSenha
app.re.pack(side=tk.BOTTOM)
app.lblCanvas = tk.Label(app.segundoContainer, text="Visualizacao do mapa", font=app.fontePadrao)
app.lblCanvas.pack(side=tk.TOP)
canvas = tk.Canvas(app.segundoContainer,width=500,height=500)
canvas.pack()
mapa = turtle.RawTurtle(canvas)
for i in range(360):
desenha(180, i,mapa)
app.mainloop()
main()
| unlicense | Python | |
0c2f07fabb94698b8cf1b42a4f671ad0cd5e365f | Add migration for comment notification type | edofic/ggrc-core,plamut/ggrc-core,AleksNeStu/ggrc-core,andrei-karalionak/ggrc-core,andrei-karalionak/ggrc-core,kr41/ggrc-core,josthkko/ggrc-core,NejcZupec/ggrc-core,prasannav7/ggrc-core,josthkko/ggrc-core,VinnieJohns/ggrc-core,kr41/ggrc-core,selahssea/ggrc-core,VinnieJohns/ggrc-core,NejcZupec/ggrc-core,j0gurt/ggrc-core,VinnieJohns/ggrc-core,NejcZupec/ggrc-core,selahssea/ggrc-core,prasannav7/ggrc-core,kr41/ggrc-core,NejcZupec/ggrc-core,josthkko/ggrc-core,VinnieJohns/ggrc-core,selahssea/ggrc-core,plamut/ggrc-core,edofic/ggrc-core,kr41/ggrc-core,plamut/ggrc-core,AleksNeStu/ggrc-core,josthkko/ggrc-core,andrei-karalionak/ggrc-core,j0gurt/ggrc-core,edofic/ggrc-core,j0gurt/ggrc-core,edofic/ggrc-core,plamut/ggrc-core,prasannav7/ggrc-core,andrei-karalionak/ggrc-core,AleksNeStu/ggrc-core,prasannav7/ggrc-core,j0gurt/ggrc-core,selahssea/ggrc-core,AleksNeStu/ggrc-core | src/ggrc/migrations/versions/20160321011353_3914dbf78dc1_add_comment_notification_type.py | src/ggrc/migrations/versions/20160321011353_3914dbf78dc1_add_comment_notification_type.py | # Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
"""
Add comment notification type
Create Date: 2016-03-21 01:13:53.293580
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
import sqlalchemy as sa
from sqlalchemy.sql import column
from sqlalchemy.sql import table
from alembic import op
# revision identifiers, used by Alembic.
revision = '3914dbf78dc1'
down_revision = '50c374901d42'
NOTIFICATION_TYPES = table(
'notification_types',
column('id', sa.Integer),
column('name', sa.String),
column('description', sa.Text),
column('template', sa.String),
column('instant', sa.Boolean),
column('advance_notice', sa.Integer),
column('advance_notice_end', sa.Integer),
column('created_at', sa.DateTime),
column('modified_by_id', sa.Integer),
column('updated_at', sa.DateTime),
column('context_id', sa.Integer),
)
NOTIFICATIONS = [{
"name": "comment_created",
"description": "Notify selected users that a comment has been created",
"template": "comment_created",
"advance_notice": 0,
"instant": False,
}]
def upgrade():
"""Add notification type entries for requests and assessments."""
op.bulk_insert(NOTIFICATION_TYPES, NOTIFICATIONS)
def downgrade():
"""Remove notification type entries for requests and assessments."""
notification_names = [notif["name"] for notif in NOTIFICATIONS]
op.execute(
NOTIFICATION_TYPES.delete().where(
NOTIFICATION_TYPES.c.name.in_(notification_names)
)
)
| apache-2.0 | Python | |
32e6a86f3e7cef04c67e1ae61db1959264d084bb | Add script for deep harvesting a nuxeo folder | barbarahui/nuxeo-calisphere,barbarahui/nuxeo-calisphere | s3stash/stash_folder.py | s3stash/stash_folder.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import argparse
import logging
from s3stash.stash_collection import Stash
_loglevel_ = 'INFO'
def main(nxpath, pynuxrc="~/.pynuxrc", replace=True, loglevel=_loglevel_):
# set up logging
logfile = 'logs/stash_folder'
numeric_level = getattr(logging, loglevel, None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % loglevel)
# log to stdout/err to capture in parent process log
# TODO: save log to S3
logging.basicConfig(
level=numeric_level,
format='%(asctime)s (%(name)s) [%(levelname)s]: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p',
stream=sys.stderr)
logger = logging.getLogger(__name__)
stash = Stash(nxpath, pynuxrc, replace)
# stash images for use with iiif server
print 'stashing images...'
image_report = stash.images()
info = 'finished stashing images'
logger.info(info)
print info
report_file = "images.json"
print "report:\t{}\n".format(report_file)
# stash text, audio, video
print 'stashing non-image files (text, audio, video)...'
file_report = stash.files()
info = 'finished stashing files'
logger.info(info)
print info
report_file = "files.json"
print "report:\t{}\n".format(report_file)
# stash thumbnails for text, audio, video
print 'stashing thumbnails for non-image files (text, audio, video)...'
thumb_report = stash.thumbnails()
info = 'finished stashing thumbnails'
logger.info(info)
print info
report_file = "thumbs.json"
print "report:\t{}\n".format(report_file)
# stash media.json files
print 'stashing media.json files for collection...'
mediajson_report = stash.media_json()
info = 'finished stashing media.json'
logger.info(info)
print info
report_file = "mediajson.json"
print "report:\t{}\n".format(report_file)
# print some information about how it went
images_stashed = len(
[key for key, value in image_report.iteritems() if value['stashed']])
files_stashed = len(
[key for key, value in file_report.iteritems() if value['stashed']])
thumbs_stashed = len(
[key for key, value in thumb_report.iteritems() if value['stashed']])
mediajson_stashed = len([
key for key, value in mediajson_report.iteritems() if value['stashed']
])
summary = ''.join((
"SUMMARY:\n",
"objects processed: {}\n".format(len(stash.objects)),
"replaced existing files on s3: {}\n".format(stash.replace),
"images stashed: {}\n".format(images_stashed),
"files stashed: {}\n".format(files_stashed),
"thumbnails stashed: {}\n".format(thumbs_stashed),
"media.json files stashed: {}\n".format(mediajson_stashed),
)
)
print summary
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Deep harvest objects in a given folder in nuxeo')
parser.add_argument('path', help='nuxeo path')
parser.add_argument(
'--pynuxrc', default='~/.pynuxrc', help='rc file for use by pynux')
parser.add_argument(
'--replace',
action='store_true',
help='replace files on s3 if they already exist')
parser.add_argument('--loglevel', default=_loglevel_)
argv = parser.parse_args()
path = argv.path
pynuxrc = argv.pynuxrc
replace = argv.replace
loglevel = argv.loglevel
sys.exit(
main(
path, pynuxrc=pynuxrc, replace=replace, loglevel=loglevel))
| bsd-3-clause | Python | |
2a6f3eca3187f8e4ca078cb592bb324a735cc246 | Create solution.py | lilsweetcaligula/Online-Judges,lilsweetcaligula/Online-Judges,lilsweetcaligula/Online-Judges | hackerrank/algorithms/sorting/easy/find_the_median/py/solution.py | hackerrank/algorithms/sorting/easy/find_the_median/py/solution.py | #!/bin/python
def partition(L, lo, hi):
# Lomuto partitioning.
#
i = j = lo
v = hi - 1
while i < hi:
if L[i] < L[v]:
L[i], L[j] = L[j], L[i]
j += 1
i += 1
L[v], L[j] = L[j], L[v]
return j
def median(L):
# Hoare's quick select.
#
if len(L) == 0:
raise ValueError("Empty sequence.")
lo = 0
hi = len(L)
while lo < hi:
v = partition(L, lo, hi)
if v < len(L) // 2:
lo = v + 1
elif v > len(L) // 2:
hi = v
else:
break
return L[v]
size = int(raw_input())
L = [int(value) for value in raw_input().split()]
m = median(L)
print m
| mit | Python | |
6f7ed6f3b082c7f6399ab456a6f6b291219c910f | ADD migration scripts for uom prices | ingadhoc/product,ingadhoc/product | product_uom_prices/migrations/8.0.0.5.0/pre-migration.py | product_uom_prices/migrations/8.0.0.5.0/pre-migration.py | # -*- encoding: utf-8 -*-
from openerp import SUPERUSER_ID
from openerp.modules.registry import RegistryManager
def set_value(cr, model, table, field, value, condition):
print 'Set value %s on field %s on table %s' % (
value, field, table)
cr.execute('SELECT id '
'FROM %(table)s '
'%(condition)s' % {
'table': table,
'condition': condition,
})
for row in cr.fetchall():
model.write(cr, SUPERUSER_ID, row[0], {field: value})
def migrate(cr, version):
print 'Migrating product_uom_prices'
if not version:
return
registry = RegistryManager.get(cr.dbname)
model = 'product.template'
table = 'product_template'
field = "list_price_type"
value = "by_uom"
condition = "WHERE use_uom_prices"
set_value(
cr,
registry[model],
table,
field,
value,
condition,
)
| agpl-3.0 | Python | |
0f072c8d9cc5dd89d375bf96ed9436f70de8c9cb | Create chivey.py | th36r1m/Chivey | chivey.py | chivey.py | #!/usr/bin/python
import urllib2
import os
pic = 1
_path = []
currentDir = os.getcwd()
error = 0
year = 2013
month = 7
_time = 6
MAX_ERROR = 10
MIN_MONTH = 01
pathCount = 0
def menu():
print 'Select a category to copy:'
print
print '[0] Fit Girls [6] Girls lingerie'
print '[1] Sexy Bikinis [7] Russian Brides'
print '[2] Burn Bra [8] Mind the Gap'
print '[3] Chesty Girls [9] Redheads'
print '[4] Mirror Girls [10] Tan Lines'
print '[5] Hump Day [11] Sexy Chivers'
print
selection = int(raw_input('Selection: '))
print
return selection
year = int(raw_input("[+] Enter starting year: "))
month = int(raw_input("[+] Enter starting month: "))
_time = int(raw_input("[+] How many months back should I scrape? "))
monthbak = month
yearbak = year
_timebak = _time
_path.append(str(year) + str('%02d' % month))
if _time > 1:
while _time != 0:
if month == 1:
month=13
year-=1
month-=1
_path.append(str(year) + str('%02d' % month))
_time-=1
selection = menu()
category=['fit-girls-','sexy-bikinis-','burn-bra-','chesty-girls-flbp-','mirror-girls-','hump-day-','girls-lingerie-','russian-brides-','mind-the-gap-','redheaded-chivettes-','tan-lines-','sexy-chivers-','custom']
for yr in _path:
if not os.path.exists(yr): os.makedirs(yr)
while _time >= 0:
url=str("http://thechive.files.wordpress.com/" + str(yearbak) + "/" + str('%02d' % monthbak + "/" + category[selection] + str(pic) + ".jpg"))
fileName=str(category[selection] + str(pic) + ".jpg")
os.chdir(str(currentDir) + "/" + str(yearbak) + str('%02d' % monthbak))
try:
u = urllib2.urlopen(url)
except urllib2.URLError, e:
if e.code == 404:
print "No file detected at URL: " + str(pic)
error+=1
pic+=1
else:
f = open(fileName, 'wb')
meta = u.info()
file_size = int(meta.getheaders("Content-Length")[0])
print "Downloading: %s Bytes: %s" % (fileName, file_size)
file_size_dl = 0
block_sz = 8192
while True:
buffer = u.read(block_sz)
if not buffer:
break
file_size_dl += len(buffer)
f.write(buffer)
status = r"%10d [%3.2f%%]" % (file_size_dl, file_size_dl * 100. / file_size)
status = status + chr(8)*(len(status)+1)
print status,
f.close()
print "W00tW00t!! Another photo you say!! That makes " + str(pic) + "!"
error=0
pic+=1
if error == MAX_ERROR:
_timebak-=1
if monthbak == MIN_MONTH:
monthbak=13
yearbak-=1
monthbak-=1
error=0
pic=1
pathCount+=1
| mit | Python | |
121a80669b4b50665a7baafd3434cb3e574087f4 | Adjust phases to make campaign non-editable | jfterpstra/bluebottle,jfterpstra/bluebottle,onepercentclub/bluebottle,jfterpstra/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,jfterpstra/bluebottle,onepercentclub/bluebottle | bluebottle/bb_projects/migrations/0004_adjust_phases.py | bluebottle/bb_projects/migrations/0004_adjust_phases.py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
from bluebottle.utils.model_dispatcher import get_model_mapping
MODEL_MAP = get_model_mapping()
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
orm['bb_projects.ProjectPhase'].objects.filter(slug='campaign').update(editable=False)
orm['bb_projects.ProjectPhase'].objects.filter(slug='done-complete').update(editable=False)
def backwards(self, orm):
"Write your backwards methods here."
orm['bb_projects.ProjectPhase'].objects.filter(slug='campaign').update(editable=True)
orm['bb_projects.ProjectPhase'].objects.filter(slug='done-complete').update(editable=True)
models = {
u'bb_projects.projectphase': {
'Meta': {'ordering': "['sequence']", 'object_name': 'ProjectPhase'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '400', 'blank': 'True'}),
'editable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'owner_editable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sequence': ('django.db.models.fields.IntegerField', [], {'unique': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200'}),
'viewable': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'bb_projects.projecttheme': {
'Meta': {'ordering': "['name']", 'object_name': 'ProjectTheme'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'name_nl': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
}
}
complete_apps = ['bb_projects']
symmetrical = True
| bsd-3-clause | Python | |
9b6f86cb2f4763625127a3d9d236238a4dd998ba | Create fileExamples.py | ReginaExMachina/royaltea-word-app,ReginaExMachina/royaltea-word-app | Bits/fileExamples.py | Bits/fileExamples.py | #!/usr/bin/env python
# CREATING A NEW FILE
file = open("newfile.txt", "w")
file.write("hello world in the new file\n")
file.write("and another line\n")
file.close()
# READING A FILE
file = open('newfile.txt', 'r')
print file.read() #Put n for the first n chars
# LOOPING OVER FILE
file = open('newfile.txt', 'r')
for line in file:
print line
# WRITING IN A FILE
file = open("newfile.txt", "w")
file.write("This is a test\n")
file.write("And here is another line\n")
file.close()
# EXAMPLES
with open("newfile.txt") as f:
for line in f:
print line,
| mit | Python | |
ae1839cbb521be5cb7e76d87bdd65f1e736ccf8d | Add python version of register-result with more robust json serialisation | panubo/docker-monitor,panubo/docker-monitor,panubo/docker-monitor | register-result.py | register-result.py | #!/usr/bin/env python
import json
import socket
import sys
if len(sys.argv) < 4:
print("Error: Usage <register-result> <client> <name> <output> <status> <ttl>")
sys.exit(128)
check_client = sys.argv[1]
check_name = sys.argv[2]
check_output = sys.argv[3]
check_status = int(sys.argv[4])
check_ttl = int(sys.argv[5]) if len(sys.argv) > 5 else 90000
# Our result dict
result = dict()
result['source'] = check_client
result['name'] = check_name
result['output'] = check_output
result['status'] = check_status
result['ttl'] = check_ttl
# TCP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = ('localhost', 3030)
sock.connect(server_address)
print (json.dumps(result))
socket.sendall(json.dumps(result))
| mit | Python | |
1b36f7e837f6c15cab838edfaf6464bef0c88c6d | Add migration for request notification types | NejcZupec/ggrc-core,prasannav7/ggrc-core,VinnieJohns/ggrc-core,andrei-karalionak/ggrc-core,selahssea/ggrc-core,edofic/ggrc-core,NejcZupec/ggrc-core,kr41/ggrc-core,NejcZupec/ggrc-core,edofic/ggrc-core,prasannav7/ggrc-core,selahssea/ggrc-core,prasannav7/ggrc-core,plamut/ggrc-core,j0gurt/ggrc-core,kr41/ggrc-core,prasannav7/ggrc-core,selahssea/ggrc-core,josthkko/ggrc-core,j0gurt/ggrc-core,edofic/ggrc-core,andrei-karalionak/ggrc-core,VinnieJohns/ggrc-core,j0gurt/ggrc-core,AleksNeStu/ggrc-core,j0gurt/ggrc-core,josthkko/ggrc-core,VinnieJohns/ggrc-core,plamut/ggrc-core,josthkko/ggrc-core,kr41/ggrc-core,josthkko/ggrc-core,andrei-karalionak/ggrc-core,VinnieJohns/ggrc-core,AleksNeStu/ggrc-core,plamut/ggrc-core,plamut/ggrc-core,AleksNeStu/ggrc-core,kr41/ggrc-core,selahssea/ggrc-core,edofic/ggrc-core,NejcZupec/ggrc-core,AleksNeStu/ggrc-core,andrei-karalionak/ggrc-core | src/ggrc/migrations/versions/20160304124523_50c374901d42_add_request_notification_types.py | src/ggrc/migrations/versions/20160304124523_50c374901d42_add_request_notification_types.py |
"""Add request notification types
Revision ID: 50c374901d42
Revises: 4e989ef86619
Create Date: 2016-03-04 12:45:23.024224
"""
import sqlalchemy as sa
from alembic import op
from sqlalchemy.sql import column
from sqlalchemy.sql import table
# revision identifiers, used by Alembic.
revision = '50c374901d42'
down_revision = '1839dabd2357'
NOTIFICATION_TYPES = table(
'notification_types',
column('id', sa.Integer),
column('name', sa.String),
column('description', sa.Text),
column('template', sa.String),
column('instant', sa.Boolean),
column('advance_notice', sa.Integer),
column('advance_notice_end', sa.Integer),
column('created_at', sa.DateTime),
column('modified_by_id', sa.Integer),
column('updated_at', sa.DateTime),
column('context_id', sa.Integer),
)
def upgrade():
op.bulk_insert(
NOTIFICATION_TYPES,
[{
"name": "request_open",
"description": ("Notify all assignees Requesters Assignees and "
"Verifiers that a new request has been created."),
"template": "request_open",
"advance_notice": 0,
"instant": False,
}, {
"name": "request_declined",
"description": "Notify Requester that a request has been declined.",
"template": "request_declined",
"advance_notice": 0,
"instant": False,
}, {
"name": "request_manual",
"description": "Send a manual notification to the Requester.",
"template": "request_manual",
"advance_notice": 0,
"instant": False,
}]
)
def downgrade():
op.execute(
NOTIFICATION_TYPES.delete().where(
NOTIFICATION_TYPES.c.name.in_([
"request_open",
"request_declined",
"request_manual",
])
)
)
| apache-2.0 | Python | |
9665113ef9a6f7fae89b8a0b7b15289ac41996f4 | Create mySolution.py | CptDemocracy/Python | Puzzles/checkio/Home/Min-and-Max/mySolution.py | Puzzles/checkio/Home/Min-and-Max/mySolution.py | def minMaxArgs(key, operator, *args):
if key == None:
key = lambda x : x
minMaxVal = args[0]
for arg in args:
cmpKey = key(arg)
if operator(cmpKey, key(minMaxVal)):
minMaxVal = arg
return minMaxVal
def minMaxIter(iterable, operator, key):
if key == None:
key = lambda x : x
count = 0
for item in iterable:
if count == 0:
minMaxVal = item
count += 1
cmpKey = key(item)
if operator(cmpKey, key(minMaxVal)):
minMaxVal = item
return minMaxVal
def minmax(operator, *args, **kwargs):
if not hasattr(operator, "__call__"):
raise TypeError("operator must be callable")
if len(args) == 0:
raise TypeError("expected at least one argument, got 0 arguments")
key = lambda x : x
if "key" in kwargs:
key = kwargs["key"]
if not hasattr(key, "__call__"):
raise TypeError("%s is not callable" % type(key))
elif len(kwargs) > 0:
raise ValueError("unexpected keyword argument")
if len(args) == 1 and (hasattr(args[0], "__iter__") or hasattr(args[0], "__getitem__") or hasattr(args[0], "__next__")):
return minMaxIter(args[0], operator, key)
else:
return minMaxArgs(key, operator, *args)
def max(*args, **kwargs):
return minmax(lambda x, y: x > y, *args, **kwargs)
def min(*args, **kwargs):
return minmax(lambda x, y: x < y, *args, **kwargs)
| mit | Python | |
2a21ee4b263692872f11ebae18663119c5041d5e | Add test for requirements | openstack/bareon,openstack/bareon | fuel_agent/tests/test_requirements.py | fuel_agent/tests/test_requirements.py | # -*- coding: utf-8 -*-
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from pkg_resources import require
def test_check_requirements_conflicts():
require('fuel_agent')
| apache-2.0 | Python | |
39c64ddf7bddb7110d6c85a5ad3c54bf95c334a2 | Create client.py | jasonblanks/pyCWStats | client.py | client.py | import sys, stat, os, re, time, base64, getpass, socket, smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
#Email settings
mail_user = "email@email.com"
mail_pwd = "password"
FROM = 'email@email.com'
TO = ['email@email.com'] #must be a list
SUBJECT = "BETA TEST: GLSA Clearwell License update"
def send_email(gmail_user,gmail_pwd,FROM,TO,SUBJECT):
msg = MIMEMultipart('alternative')
msg['Subject'] = SUBJECT
msg['From'] = FROM
msg['To'] = ', '.join(TO)
#msg['To'] = TO
with open ("\\\\jason\\tmp\\lic_stat.txt", "r") as myTEXT:
TEXT=myTEXT.read().replace('\n', '')
#TEXT = open ("\\\\jason\\tmp\\lic_stat.txt", 'r')
#TEXT = printl(ServerList)
part1 = MIMEText(TEXT, 'html')
msg.attach(part1)
if TEXT != None:
try:
server = smtplib.SMTP("smtpout.server.com", 25) #or port 465 doesn't seem to work!
server.ehlo()
server.sendmail(FROM, TO, msg.as_string())
server.close()
print 'successfully sent the mail'
except (RuntimeError, TypeError, NameError):
pass
def start_job():
startjob = open("\\\\jason\\tmp\\job_start", 'w')
startjob.close
job = 1
start_job()
while job:
time.sleep(10)
def file_check():
for f in os.listdir("\\\\jason\\tmp"):
if f == "job_complete":
try:
time.sleep (5)
os.remove("\\\\jason\\tmp\\job_complete")
except:
file_check()
'''
for f in os.listdir("\\\\jason\\tmp"):
if f == "job_complete":
send_email(mail_user,mail_pwd,FROM,TO,SUBJECT)
try:
os.remove("\\\\jason\\tmp\\job_complete")
except:
file = True
while file:
for f in os.listdir("\\\\jason\\tmp"):
if f == "job_complete":
file = True
break
else:
file = False
os.remove("\\\\jason\\tmp\\job_complete")
'''
job = 0
send_email(mail_user,mail_pwd,FROM,TO,SUBJECT)
sys.exit()
file_check()
| apache-2.0 | Python | |
1bd9013c925cfbbebcff33bf7796fde729d26b34 | add cardify script | district10/blog,district10/blog,district10/blog,district10/blog | cardify.py | cardify.py | import os
import re
import sys
import glob
import errno
import shutil
from typing import Dict, Tuple, Union, Any, List, Optional
from pprint import pprint
def mkdir_p(path: str):
try:
path = os.path.abspath(path)
os.makedirs(path)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
logger.exception(f'failed at creating directorie(s): {path}')
raise e
pwd = os.path.abspath(os.path.dirname(__file__))
output_dir = f'{pwd}/notes/cards'
shutil.rmtree(output_dir)
mkdir_p(output_dir)
index = 0
def write_note(text: str) -> str:
global index
if text.count('\n') < 2:
return
index += 1
p = f'{output_dir}/card_{index:010d}.md'
with open(p, 'w') as f:
prefix = ''.join(['<br>'] * 6)
f.write(f'{prefix}\n\n- ')
f.write(text)
return p
if __name__ == '__main__':
for path in glob.glob(f'{pwd}/notes/**/*.md'):
if path.endswith('index.md') or path.startswith(output_dir):
continue
print(f'processing {path}...')
with open(path) as f:
for i, p in enumerate(re.split('\n- ', f.read())[1:]):
write_note(p)
cards = sorted(glob.glob(f'{output_dir}/card*.md'))
print(f'wrote #{len(cards)} cards, writing index.md...', end=' ')
with open(f'{output_dir}/index.md', 'w') as f:
f.write('# Cards\n')
for card in cards:
basename = os.path.basename(card)
f.write(f'\n- [{basename}]({basename})')
print('done') | mit | Python | |
fddbbc536ad5097769d924d49420e7d5d2e5999f | Update app/extensions/minify/__init__.py | apipanda/openssl,apipanda/openssl,apipanda/openssl,apipanda/openssl | app/extensions/minify/__init__.py | app/extensions/minify/__init__.py | from htmlmin import Minifier
class HTMLMIN(object):
def __init__(self, app=None, **kwargs):
self.app = app
if app is not None:
self.init_app(app)
default_options = {
'remove_comments': True,
'reduce_empty_attributes': True,
'remove_optional_attribute_quotes': False
}
default_options.update(kwargs)
self.html_minify = Minifier(
**default_options)
def init_app(self, app):
app.config.setdefault('MINIFY_PAGE', True)
if app.config['MINIFY_PAGE']:
app.after_request(self.response_minify)
def response_minify(self, response):
"""
minify response html to decrease traffic
"""
if response.content_type == u'text/html; charset=utf-8':
response.direct_passthrough = False
raw_data = response.get_data(as_text=True)
response.set_data(
self.html_minify.minify(raw_data)
)
return response
return response
| mit | Python | |
118a4af7fbc2455d1dcde54e7041a3919f760d69 | Create switch_controls_snmp.py | kylehogan/hil,apoorvemohan/haas,meng-sun/hil,SahilTikale/haas,apoorvemohan/haas,henn/hil_sahil,henn/hil,lokI8/haas,CCI-MOC/haas,SahilTikale/switchHaaS,henn/hil_sahil,meng-sun/hil,kylehogan/hil,kylehogan/haas,henn/hil,henn/haas | python-mocutils/mocutils/switch_controls_snmp.py | python-mocutils/mocutils/switch_controls_snmp.py | #! /usr/bin/python
import os
def make_remove_vlans(vlan_ids,add,switch_ip='192.168.0.1',community='admin'):
# Expects that you send a string which is a comma separated list of vlan_ids and a bool for adding or removing
OID_portVlanId='1.3.6.1.4.1.11863.1.1.4.3.1.1.2.1.1'
OID_portVlanStatus='1.3.6.1.4.1.11863.1.1.4.3.1.1.2.1.6'
for vlan_id in vlan_ids.split(','):
if add:
os.system('snmpset -v1 -c'+community+' '+switch_ip+' '+OID_portVlanId+'.'+vlan_id+' i '+vlan_id)
os.system('snmpset -v1 -c'+community+' '+switch_ip+' '+OID_portVlanStatus+'.'+vlan_id+' i 4')
else:
os.system('snmpset -v1 -c'+community+' '+switch_ip+' '+OID_portVlanStatus+'.'+vlan_id+' i 6')
def edit_ports_on_vlan(port_ids,vlan_id,add,switch_ip='192.168.0.1',community='admin'):
# Expects that you send a comma separated list of ports
# A string for vlan_id
# And a bool for adding (True = adding, False = Removing)
OID_vlanUntagPortMemberAdd='1.3.6.1.4.1.11863.1.1.4.3.1.1.2.1.4'
OID_vlanPortMemberRemove='1.3.6.1.4.1.11863.1.1.4.3.1.1.2.1.5'
if add:
os.system('snmpset -v1 -c'+community+' '+switch_ip+' '+OID_vlanUntagPortMemberAdd+'.'+vlan_id+' s '+'"'+port_ids+'"')
else:
os.system('snmpset -v1 -c'+community+' '+switch_ip+' '+OID_vlanPortMemberRemove+'.'+vlan_id+' s '+'"'+port_ids+'"')
| apache-2.0 | Python | |
f009f42c168e396e437e08914dc28eb1e08fb7fe | test of c++ wavefront code on big donut | aaronroodman/Donut,aaronroodman/Donut,aaronroodman/Donut | test/test-bigdonut-cpp.py | test/test-bigdonut-cpp.py | ###
### Script for fitting a BIG donut
###
import numpy as np
from donutlib.donutfit import donutfit
fitinitDict = {"nZernikeTerms":15,"fixedParamArray1":[0,1,0,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],"fixedParamArray2":[0,1,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],"fixedParamArray3":[0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],"nFits":3,"nPixels":256,"nbin":2048,"scaleFactor":1.0,"pixelOverSample":8,"iTelescope":0,"inputrzero":0.15,"outputWavefront":True,"debugFlag":False,"gain":4.5,"wavefrontMapFile" : "/Users/roodman/Astrophysics/Donuts/decam_2012-nominalzernike.pickle", "doGridFit":True, "spacing":64}
df = donutfit(**fitinitDict)
# fit donut
fitDict = {}
fitDict["inputFile"] = 'DECam_00236392.S4.0003.stamp.fits'
fitDict["outputPrefix"] = 'DECam_wave_00236392.S4.0003'
fitDict["inputrzero"] = 0.125
fitDict["inputZernikeDict"] = {"S4":[0.0,0.0,53.0],"None":[0.0,0.0,11.0]}
df.setupFit(**fitDict)
df.gFitFunc.closeFits()
| mit | Python | |
2f2e7605d87ef06c547df660805abb99835dee18 | Add a snippet. | jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets | python/pyqt/pyqt5/widget_QAction.py | python/pyqt/pyqt5/widget_QAction.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QApplication, QMainWindow, QLabel, QAction, QSizePolicy
app = QApplication(sys.argv)
# The default constructor has no parent.
# A widget with no parent is a window.
window = QMainWindow()
window.setWindowTitle('Hello')
label = QLabel("Press Ctrl+P to print a message on the terminal", window)
label.resize(800, 100)
# Set key shortcut ################################
def action_callback():
print("Hello!")
# see https://stackoverflow.com/a/17631703 and http://doc.qt.io/qt-5/qaction.html#details
action = QAction(label) # <-
action.setShortcut(Qt.Key_P | Qt.CTRL) # <-
action.triggered.connect(action_callback) # <-
label.addAction(action) # <-
###################################################
window.show()
# The mainloop of the application. The event handling starts from this point.
# The exec_() method has an underscore. It is because the exec is a Python keyword. And thus, exec_() was used instead.
exit_code = app.exec_()
# The sys.exit() method ensures a clean exit.
# The environment will be informed, how the application ended.
sys.exit(exit_code)
| mit | Python | |
75a9584cc859d60c598582b382f41bd685579072 | add a new config file at the project root | heprom/pymicro | config.py | config.py | import os
PYMICRO_ROOT_DIR = os.path.abspath(os.curdir)
PYMICRO_EXAMPLES_DATA_DIR = os.path.join(PYMICRO_ROOT_DIR, 'examples', 'data')
PYMICRO_XRAY_DATA_DIR = os.path.join(PYMICRO_ROOT_DIR, 'pymicro', 'xray', 'data') | mit | Python | |
8212faa90328daabb85c7e877942a667aa200119 | add config.py | sljeff/JBlog,sljeff/JBlog,sljeff/JBlog | config.py | config.py | import configparser
import datetime
__all__ = ['blog_name', 'categories', 'dates', 'article_num']
config = configparser.ConfigParser()
config.read('blog.ini', encoding='utf-8')
DEFAULT = config['DEFAULT']
blog_name = DEFAULT.get('blog_name', "No Name Here")
pre_category_name = DEFAULT.get('category_name', "") # type: str
pre_category_links = DEFAULT.get('category_links', "") # type: str
from_date = DEFAULT.get('from_date', '201610')
# year and month from config
from_year = int(from_date[:4])
from_month = int(from_date[4:])
assert 1 <= from_month <= 12 and 1970 < from_year < 9999, 'wrong from_date'
# create from_datetime and to_datetime
from_datetime = datetime.datetime(from_year, from_month, 1)
to_datetime = datetime.datetime.now()
# add_month_num
add_month_num = 3
# a function that get year and month after add months
def add_month(year, month, add_num):
month = month + add_num
if month > 12:
year += 1
month -= 12
elif month <= 0:
year -= 1
month += 12
return year, month
# get dates
dates = []
while from_datetime < to_datetime:
link = '/t/' + str(from_datetime.timestamp())
name_start = '{}年{}月'.format(str(from_datetime.year), str(from_datetime.month))
new_year, new_month = add_month(from_datetime.year, from_datetime.month, add_month_num)
from_datetime = datetime.datetime(new_year, new_month, 1) # new from_datetime
to_year, to_month = add_month(from_datetime.year, from_datetime.month, -1)
name_end = '{}年{}月'.format(str(to_year), str(to_month))
link += '_to_' + str(from_datetime.timestamp())
dates.append((link, name_start+' 至 '+name_end))
# get categories
category_name = [x.strip() for x in pre_category_name.split('|')]
category_links = ['/c/'+x.strip() for x in pre_category_links.split('|')]
categories = list(zip(category_links, category_name))
# get article_num
article_num = DEFAULT['article_num']
| mit | Python | |
5259453165cca4767743469b5e77c6eabe444839 | add config.py | samtx/whatsmyrankine,samtx/whatsmyrankine,samtx/whatsmyrankine,samtx/whatsmyrankine,samtx/whatsmyrankine | config.py | config.py | class Config(object):
DEBUG = False
TESTING = False
CSRF_ENABLED = True
SECRET_KEY = 'this-really-needs-to-be-changed'
class ProductionConfig(Config):
DEBUG = False
class StagingConfig(Config):
DEVELOPMENT = True
DEBUG = True
class DevelopmentConfig(Config):
DEVELOPMENT = True
DEBUG = True
class TestingConfig(Config):
TESTING = True | mit | Python | |
979d0906ba1bc7f3ec3e77a6e09ec8a1a2449323 | add clean config.py | will-iam/Variant,will-iam/Variant,will-iam/Variant | config.py | config.py | import os
workspace = os.getcwd()
gnu_CC = 'gcc'
gnu_CXX = 'g++'
clang_CC = 'clang'
clang_CXX = 'clang++'
intel_CC = 'icc'
intel_CXX = 'icpc'
mpi_CC = 'mpicc'
mpi_CXX = 'mpic++'
# keywords are: $mpi_nprocs, $ncores
mpi_RUN = 'mpirun -hostfile hostfile -np $mpi_nprocs'
core_per_node = 2
# tmp dir to launch a run.
tmp_dir = os.path.join(workspace, 'tmp')
# tmp dir to store results after a run.
results_dir = os.path.join(workspace, 'results')
## Cases directory, makes life easier when they're distinct.
case_ic = os.path.join(workspace, 'casepy') # Where the python scripts to build initial condition are.
case_input = os.path.join(workspace, 'caseinput') # Where the built initial conditions are (heady)
case_ref = os.path.join(workspace, 'caseref') # Where the references (results to compare with) are
#What you can add in your hostfile for mpirun.
#localhost slots=4
| mit | Python | |
47b88e59781cf2aeb1a4bb3b6b97ceaf6b883820 | Add prime count | sitdh/com-prog | cpp_10.py | cpp_10.py | first_number = int(input())
if 0 == int(first_number):
print('none')
exit()
prime_count = ''
while True:
if 2 == first_number:
prime_count = '2'
break
running_number = first_number
divider = first_number // 2 if ( 0 == first_number % 2 ) else ( first_number // 2 ) + 1;
count = 0
while divider != 1:
if 0 == running_number % divider:
count += 1
divider -= 1
if count == 0:
prime_count += ' ' + str( running_number )
first_number -= 1
if 1 == first_number:
break
p = ''
for c in prime_count.strip().split():
p = c + ' ' + p
print( p.strip() )
| mit | Python | |
ccac9cddfad2b883fc8e2c7c8ab27607ba8c4c63 | Create config.py | ThisIsAmir/TweenRoBot,ThisIsAmir/TweenRoBot | config.py | config.py | token = '252128496:AAHUDCZJlHpd21b722S4B_n6prn8RUjy4'
is_sudo = '223404066' #@This_Is_Amir
relam = '-133494595'
# ___ __ __ _ _ _ _____
# / _ \ / _|/ _| | (_) \ | | __|_ _|__ __ _ _ __ ___
#| | | | |_| |_| | | | \| |/ _ \| |/ _ \/ _ | _ _ \
#| |_| | _| _| |___| | |\ | __/| | __/ (_| | | | | | |
#\___/|_| |_| |_____|_|_| \_|\___||_|\___|\__,_|_| |_| |_|
| mit | Python | |
9fae2d4c7ecc35bde8079f5a71a2b369690cd9a3 | add config.py | MinnPost/salesforce-stripe,texastribune/salesforce-stripe,texastribune/salesforce-stripe,MinnPost/salesforce-stripe,texastribune/salesforce-stripe,MinnPost/salesforce-stripe | config.py | config.py | import os
import stripe
stripe_keys = {
'secret_key': os.environ['SECRET_KEY'],
'publishable_key': os.environ['PUBLISHABLE_KEY']
}
SALESFORCE = {
"CLIENT_ID": os.environ[ 'SALESFORCE_CLIENT_ID' ],
"CLIENT_SECRET": os.environ[ 'SALESFORCE_CLIENT_SECRET' ],
"USERNAME": os.environ[ 'SALESFORCE_USERNAME' ],
"PASSWORD": os.environ[ 'SALESFORCE_PASSWORD' ],
"HOST": os.environ[ "SALESFORCE_HOST" ]
}
| mit | Python | |
298f7d65ba29a0524ff2a3f8eb4b564ed91ad057 | Document find_by_name so I remember what to do with it. | brantai/python-rightscale,diranged/python-rightscale-1 | rightscale/util.py | rightscale/util.py | import os.path
import ConfigParser
CFG_USER_RC = '.rightscalerc'
CFG_SECTION_OAUTH = 'OAuth'
CFG_OPTION_ENDPOINT = 'api_endpoint'
CFG_OPTION_REF_TOKEN = 'refresh_token'
_config = None
class HookList(list):
pass
class HookDict(dict):
pass
def get_config():
global _config
if not _config:
_config = ConfigParser.SafeConfigParser()
# set up some defaults - too bad only newer pythons know how to do this
# more gracefully:
_config.add_section(CFG_SECTION_OAUTH)
_config.set(CFG_SECTION_OAUTH, CFG_OPTION_ENDPOINT, '')
_config.set(CFG_SECTION_OAUTH, CFG_OPTION_REF_TOKEN, '')
home = os.path.expanduser('~')
rc_file = os.path.join(home, CFG_USER_RC)
_config.read(rc_file)
return _config
def get_rc_creds():
"""
Reads ~/.rightscalerc and returns API endpoint and refresh token.
Always returns a tuple of strings even if the file is empty - in which
case, returns ``('', '')``.
"""
config = get_config()
try:
return (
config.get(CFG_SECTION_OAUTH, CFG_OPTION_ENDPOINT),
config.get(CFG_SECTION_OAUTH, CFG_OPTION_REF_TOKEN),
)
except:
return ('', '')
def find_href(obj, rel):
for l in obj.get('links', []):
if l['rel'] == rel:
return l['href']
def find_by_name(collection, name):
"""
:param rightscale.ResourceCollection collection: The collection in which to
look for :attr:`name`.
:param str name: The name to look for in collection.
"""
params = {'filter[]': ['name==%s' % name]}
found = collection.index(params=params)
if len(found) > 1:
raise ValueError("Found too many matches for %s" % name)
return found[0]
| import os.path
import ConfigParser
CFG_USER_RC = '.rightscalerc'
CFG_SECTION_OAUTH = 'OAuth'
CFG_OPTION_ENDPOINT = 'api_endpoint'
CFG_OPTION_REF_TOKEN = 'refresh_token'
_config = None
class HookList(list):
pass
class HookDict(dict):
pass
def get_config():
global _config
if not _config:
_config = ConfigParser.SafeConfigParser()
# set up some defaults - too bad only newer pythons know how to do this
# more gracefully:
_config.add_section(CFG_SECTION_OAUTH)
_config.set(CFG_SECTION_OAUTH, CFG_OPTION_ENDPOINT, '')
_config.set(CFG_SECTION_OAUTH, CFG_OPTION_REF_TOKEN, '')
home = os.path.expanduser('~')
rc_file = os.path.join(home, CFG_USER_RC)
_config.read(rc_file)
return _config
def get_rc_creds():
"""
Reads ~/.rightscalerc and returns API endpoint and refresh token.
Always returns a tuple of strings even if the file is empty - in which
case, returns ``('', '')``.
"""
config = get_config()
try:
return (
config.get(CFG_SECTION_OAUTH, CFG_OPTION_ENDPOINT),
config.get(CFG_SECTION_OAUTH, CFG_OPTION_REF_TOKEN),
)
except:
return ('', '')
def find_href(obj, rel):
for l in obj.get('links', []):
if l['rel'] == rel:
return l['href']
def find_by_name(res, name):
params = {'filter[]': ['name==%s' % name]}
found = res.index(params=params)
if len(found) > 1:
raise ValueError("Found too many matches for %s" % name)
return found[0]
| mit | Python |
fc7da8e039c38140f3855e8c58d1db9a4e8ed133 | add demo about using ftplib.FTP | ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study | reading-notes/CorePython/src/ftp.py | reading-notes/CorePython/src/ftp.py | # Copyright (c) 2014 ASMlover. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list ofconditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materialsprovided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import ftplib
import os
import socket
HOST = 'ftp.mozilla.org'
DIRN = 'pub/mozilla.org/webtools'
FILE = 'bugzilla-LATEST.tar.gz'
def main():
try:
f = ftplib.FTP(HOST)
except (socket.error, socket.gaierror), e:
print 'ERROR: cannot reach "%s"' % HOST
return
print '*** Connected to host "%s"' % HOST
try:
f.login()
except ftplib.error_perm:
print 'ERROR: cannot login anonymously'
f.quit()
return
print '*** Logined in as "anonymously"'
try:
f.cwd(DIRN)
except ftplib.error_perm:
print 'ERROR: cannot cd to "%s"' % DIRN
f.quit()
return
print '*** Changed to "%s" folder' % DIRN
try:
f.retrbinary('RETR %s' % FILE, open(FILE, 'wb').write)
except ftplib.error_perm:
print 'ERROR: cannot read file "%s"' % FILE
os.unlink(FILE)
else:
print '*** Downloaded "%s" to cwd' % FILE
f.quit()
return
if __name__ == '__main__':
main()
| bsd-2-clause | Python | |
d059fa531f46fe063e7811a17478fab6c913acb4 | add migration file | sigmapi-gammaiota/sigmapi-web,sigmapi-gammaiota/sigmapi-web,sigmapi-gammaiota/sigmapi-web,sigmapi-gammaiota/sigmapi-web | sigmapiweb/apps/Scholarship/migrations/0006_course_coursesection_review.py | sigmapiweb/apps/Scholarship/migrations/0006_course_coursesection_review.py | # Generated by Django 3.1.6 on 2021-11-18 13:53
import apps.Scholarship.models
import common.mixins
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('Scholarship', '0005_change_on_delete'),
]
operations = [
migrations.CreateModel(
name='Course',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('catalog_code', models.CharField(max_length=10, unique=True, validators=[django.core.validators.RegexValidator(regex='[A-Z]+[0-9]+')])),
('title', models.CharField(max_length=100)),
],
bases=(common.mixins.ModelMixin, models.Model),
),
migrations.CreateModel(
name='Review',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('stars', models.IntegerField(validators=[apps.Scholarship.models.validate_stars])),
('text', models.CharField(max_length=1000)),
('reviewer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
bases=(common.mixins.ModelMixin, models.Model),
),
migrations.CreateModel(
name='CourseSection',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('term', models.CharField(choices=[('A', 'A'), ('B', 'B'), ('C', 'C'), ('D', 'D'), ('E', 'E'), ('S', 'S'), ('F', 'F')], default='A', max_length=1)),
('year', models.PositiveIntegerField(validators=[django.core.validators.MaxValueValidator(99)])),
('professor', models.CharField(max_length=100)),
('catalog_course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Scholarship.course', to_field='catalog_code')),
('participants', models.ManyToManyField(to=settings.AUTH_USER_MODEL)),
],
bases=(common.mixins.ModelMixin, models.Model),
),
]
| mit | Python | |
2085083fc842c03efae72bbf288804ddd67605b1 | add list_comprehension | Akagi201/learning-python,Akagi201/learning-python,Akagi201/learning-python,Akagi201/learning-python,Akagi201/learning-python | misc/list_comprehension.py | misc/list_comprehension.py | #!/usr/bin/env python
s = [2*x for x in range(101) if x ** 2 > 3]
print s
| mit | Python | |
9e60fd94ef801bab0e8e9a5956b5c00c911bd6ca | Create tesseract_example.py | MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab | home/kyleclinton/tesseract_example.py | home/kyleclinton/tesseract_example.py | ################################################################
#
# tesseract_example.py
# Kyle J Clinton
#
# This is an example of the use of TesseractOcr to read text from an image
# it is using many of the services that are common to the InMoov project or
# MRL in general
#
################################################################
### Start and Setup OpenCV
opencv=Runtime.start("opencv","OpenCV")
opencv.captureFromResourceFile("http://192.168.1.130:8080/stream/video.mjpeg");
# This Grabber does not seem to set correctly inside of OpenCV !?!?!
opencv.setFrameGrabberType("org.myrobotlab.opencv.MJpegFrameGrabber");
opencv.capture()
### Start and Setup Tesseract (Not as a filter inside OpenCV)
tesseract = Runtime.createAndStart("tesseract","TesseractOcr")
### Start and Setup MarySpeech (You could switch out for your favorite TextToSpeech service)
mouth = Runtime.createAndStart("MarySpeech", "MarySpeech")
mouth.setVoice("cmu-bdl-hsmm")
### This is my little mod to the voice to make it specifically "Junior's Voice"
mouth.setAudioEffects("TractScaler(amount=1.4) + F0Add(f0Add=60.0) + Robot(amount=8.0) ")
def readTextFromImage():
tesseract = Runtime.createAndStart("tesseract","TesseractOcr")
txtStr = tesseract.ocr("20170908_141852.jpg")
print("tess results: ", txtStr)
mouth.speakBlocking(txtStr)
## Not sure why I need to cleanup string for image name???
####imgNameStr = opencv.recordSingleFrame()
####imgNameStr = imgNameStr.replace("u'", "").replace("'", "")
####print("captured image: ", imgNameStr)
####txtStr = tesseract.ocr(imgNameStr)
## For Testing
#txtStr = tesseract.ocr("20170908_141852.jpg")
## Cleanup of the string is required and this is very basic and needs to be more robust!
#txtStr = txtStr.replace("\n", " ").replace(":", " ")
#print("tess results: ", txtStr)
#mouth.speakBlocking(txtStr)
| apache-2.0 | Python | |
058de6743532340611ac304c99bc7dd4ea474350 | Create NSEPA-Bypass.py | Lucky0x0D/NetScalerEPABypass | NSEPA-Bypass.py | NSEPA-Bypass.py | import sys
import base64
import hashlib
## Requires pyCrypto --> run 'pip install pycrypto'
from Crypto.Cipher import AES
## Check that theres is enough info
if (len(sys.argv) < 5):
print("You're not giving me enough to work with here:\n\n");
print("Usage:\n");
print("python NSEPA-Bypass.py \"NSC_EPAC Cookie Value\" \"EPOCH Time from client\" \"Value of the HOST: Header\" \"Base64 encoded string from Server\"\n\n\n");
print("Example:\n");
print("python NSEPA-Bypass.py \"981005eef29ce34c80f535f9e78f4b4d\" \"1498797356\" \"vpn.example.com\" \"WWoNstbK760pVoPwPzHbs9pEf6Tj/iBk55gnHYwptPohBR0bKsiVVZmDN8J8530G4ISIFkRcC/1IaQSiOr8ouOYC84T5Hzbs2yH3Wq/KToo=\" \n\n\n");
exit(1);
## Set up the variables.
key = ""
hexcookie=""
cookie = sys.argv[1]
epoch = sys.argv[2]
host = sys.argv[3]
EPAcrypt64 = sys.argv[4]
EPAcrypt = base64.b64decode(EPAcrypt64)
## Take the cookie string and load it as hex
for i in range(0, len(cookie), 2):
hexcookie= hexcookie + chr( int(cookie[i:i+2],16))
## Build the key source
keystring = "NSC_EPAC=" + cookie + "\r\n" + epoch + "\r\n" + host + "\r\n" + hexcookie
## Hash the key source
hashedinput = hashlib.sha1(keystring).hexdigest()
## load the hex of the ascii hash
for i in range(0, len(hashedinput), 2):
key = key + chr( int(hashedinput[i:i+2],16))
## Take the first 16 bytes of the key
key = key[:16]
print "\n"
print "The key for this session is:\n"
print ' '.join(x.encode('hex') for x in key)
print "\n"
## Decryption if encrypted BASE64 Provided
decryption_suite = AES.new(key, AES.MODE_CBC, hexcookie)
decrypted = decryption_suite.decrypt(EPAcrypt).strip()
print "The NetScaler Gateway EPA request: \n\r" + decrypted
print "\n"
## Figure out how many '0's to respond with
## (semi-colon is the EPA request delimiter)
CSECitems = (decrypted.count(';'))
#Add PKCS5 Padding (string to be encrypted must be a multiple of 16 bytes)
padding=16-(decrypted.count(';'))
response = (chr(48)*CSECitems)+(chr(padding)*padding)
## Encryption
encryption_suite = AES.new(key, AES.MODE_CBC, hexcookie)
print "Replace your current CSEC header with: \nCSEC: " + base64.b64encode(encryption_suite.encrypt(response))
print "\n"
| unlicense | Python | |
b3a1f84fb6f28598595f00bdb01d789051999cb9 | Update 2016-09-19 11h20 | HuuHoangNguyen/Python_learning | GUI_Tkinter_Demo.py | GUI_Tkinter_Demo.py | #!/usr/bin/python
import Tkinter
import tkMessageBox
top = Tkinter.Tk()
def helloCallBack():
tkMessageBox.showinfo("Hello Python", "Hello World")
B = Tkinter.Button(top, text="Hello", command = helloCallBack)
B.pack()
top.mainloop() | mit | Python | |
e397d400a81466b22ae735f60f5a239ca4b7d653 | create domain lookup module | jasonaowen/irc | domain.py | domain.py | # domain.py
# Look up a domain's availability
# Copyright 2015 Jason Owen <jason.a.owen@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import datetime
import xmlrpclib
from twisted.internet import reactor
def repeatLookup(handler, client, channel, domain):
handler.doLookup(client, channel, domain)
class DomainHandler:
def __init__(self, args):
self.apiKey = args["apiKey"]
self.api = xmlrpclib.ServerProxy('https://rpc.gandi.net/xmlrpc/')
def channelMessage(self, client, channel, name, message):
if message.lower().find("!domain") == 0:
domain = message.split(' ')[1].lower()
self.doLookup(client, channel, domain)
return True
return False
def doLookup(self, client, channel, domain):
self.log(client, "looking up domain: %s" % (domain,))
result = self.api.domain.available(self.apiKey, [domain])
self.log(client, "Got result: %s" % (result,))
self.printOrCallback(client, channel, domain, result)
def printOrCallback(self, client, channel, domain, result):
if (result[domain] == 'pending'):
self.log(client, "Scheduling callback for domain %s" % (domain,))
reactor.callLater(1, repeatLookup, self, client, channel, domain)
else:
self.log(client, "Domain %s is %s" % (domain, result[domain],))
client.say(channel, "The domain %s is %s." % (domain, result[domain],))
def log(self, client, message):
print "%s %s: %s" % (client.nickname, self.now(), message,)
def now(self):
return datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
| agpl-3.0 | Python | |
dda3ce9c56967dc6069b61f16feed2932e24ea14 | test = input ("CPF: ") cpf = test[:3] + "." + test[3:6] + "." + test[6:9] + "-" + test[9:] print(cpf) | bigown/SOpt,maniero/SOpt,maniero/SOpt,bigown/SOpt,maniero/SOpt,maniero/SOpt,maniero/SOpt,bigown/SOpt,maniero/SOpt,bigown/SOpt,maniero/SOpt,bigown/SOpt,bigown/SOpt,maniero/SOpt,maniero/SOpt,bigown/SOpt,bigown/SOpt,maniero/SOpt,bigown/SOpt,maniero/SOpt,bigown/SOpt,maniero/SOpt,maniero/SOpt,maniero/SOpt,maniero/SOpt,maniero/SOpt | Python/FormatCpf.py | Python/FormatCpf.py | test = input ("CPF: ")
cpf = test[:3] + "." + test[3:6] + "." + test[6:9] + "-" + test[9:]
print(cpf)
#https://pt.stackoverflow.com/q/237371/101
| mit | Python | |
f27241b5409ec00568efa1752d5eeb71516b16bd | Add cellular.py | joseph346/cellular | cellular.py | cellular.py | import random
class TotalisticCellularAutomaton:
def __init__(self):
self.n_cells = 200
self.n_states = 5
self.symbols = ' .oO0'
self.radius = 1
self.cells = [random.randrange(0, self.n_states) for _ in range(self.n_cells)]
n_rules = (2*self.radius + 1) * (self.n_states - 1)
self.rules = [0] + [random.randrange(0, self.n_states) for _ in range(n_rules)]
def neighbor_sum(self, pos):
return sum(self.cells[(pos+i)%self.n_cells] for i in range(-self.radius, self.radius+1))
def next_gen(self):
self.cells = [self.rules[self.neighbor_sum(i)] for i in range(self.n_cells)]
def print_gen(self):
print(''.join(self.symbols[state] for state in self.cells))
def main():
ca = TotalisticCellularAutomaton()
print(ca.rules)
while True:
ca.print_gen()
ca.next_gen()
if __name__ == '__main__':
main()
| unlicense | Python | |
03d5fb46c877d176ed710a8d27b5ad7af699dc52 | add Lubebbers example | pylayers/pylayers,pylayers/pylayers,dialounke/pylayers,dialounke/pylayers | pylayers/antprop/tests/Diffraction-Luebbers.py | pylayers/antprop/tests/Diffraction-Luebbers.py |
# coding: utf-8
# In[1]:
from pylayers.simul.link import *
# In[2]:
DL=DLink(L=Layout('Luebbers.ini'),graph='tvi')
# In[3]:
# get_ipython().magic(u'matplotlib inline')
# DL.L.showG('i')
# In[7]:
DL.a = np.array(([37.5,6.2,2.]))
DL.b = np.array(([13,30,2.]))
DL.fGHz=np.array(([0.9,1.0]))
# In[8]:
plt.ion()
# In[9]:
DL.eval(diffraction=True)
# In[10]:
DL.R
# In[ ]:
| mit | Python | |
1137a5ffa3481a224649dc2321b17fe227a7553d | Create glitch.py | Loreleix64/aradiabot | glitch.py | glitch.py | # Aradiabot image glitching functions.
# Transcribed over from my 'fastglitch' repository.
from io import BytesIO, StringIO
import random, sys, PIL.Image, PIL.ImageChops, PIL.ImageDraw, os
chars = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890'
import asyncio
def genImg(fname):
img = PIL.Image.open(fname)
img = img.convert('RGBA')
proto1 = RandomByteAddition(img, random.randint(1,16))
proto2 = RGBOffset(proto1, random.randint(1,64))
proto3 = PixelOffset(proto2, random.randint(1,512))
proto4 = Artifact(proto3, random.randint(1,64))
proto5 = RowSlice(proto4, random.randint(1,32))
proto6 = Noise(proto5, random.randint(25000,50000))
p = proto6.convert('RGB')
p.save('new' + fname + '.png')
proto1.close()
proto2.close()
proto3.close()
proto4.close()
proto5.close()
proto6.close()
def RandomByteAddition(image, seed):
bytesBroken = False
bytesobj = BytesIO()
image.save(bytesobj, 'jpeg')
iter = seed
bytesobj.seek(1024)
if seed > 0:
for x in range(0, iter):
bytes2 = bytesobj
bytesobj.seek(random.randint(0, 32), 1)
byte = random.choice(chars)
bytesobj.write(bytes(byte, 'utf-8'))
try:
PIL.Image.open(bytesobj)
except:
bytesBroken = True
break
if bytesBroken == True:
bytes2.seek(0)
new_img = PIL.Image.open(bytes2)
else:
bytesobj.seek(0)
new_img = PIL.Image.open(bytesobj)
return new_img
def RGBOffset(image, distance):
distance = distance * 30
r, g, b = image.split()
r = PIL.ImageChops.offset(r, distance * -1, 0)
b = PIL.ImageChops.offset(b, distance, 0)
new_img = PIL.Image.merge('RGB', (r, g, b))
return new_img
def PixelOffset(image, distance):
new_img = PIL.ImageChops.offset(image, distance)
return new_img
def RowSlice(image, sliceamount):
cps = 0
new_img = image
for x in range(sliceamount):
upbound = cps
downbound = upbound + random.randint(16, 128)
if downbound > image.height:
break
box = (0,
upbound,
new_img.width,
downbound)
reigon = new_img.crop(box)
distance = random.randint(-128, 128)
reigon = PIL.ImageChops.offset(reigon, distance, 0)
new_img.paste(reigon, box)
reigon.close()
cps = downbound
return new_img
def Artifact(image, screwamount):
tnspimg = image.convert('RGBA')
base = PIL.Image.new('RGBA', tnspimg.size, (255, 255, 255, 0))
rows = PIL.ImageDraw.Draw(base)
cps = 0
for x in range(screwamount):
leftbound = cps
rightbound = leftbound + random.randint(32, 128)
if rightbound > image.width:
break
y1 = random.randint(0, image.height - int(round(image.height / 2.0, 0)))
x1 = random.randint(leftbound, rightbound - 1)
y2 = random.randint(y1, image.height)
x2 = rightbound
color = (random.randint(0, 255),
random.randint(0, 255),
random.randint(0, 255),
random.randint(64, 200))
rows.rectangle((x1,
y1,
x2,
y2), fill=color)
cps = rightbound
new_img = PIL.Image.alpha_composite(tnspimg, base)
return new_img
def Noise(image, pixels):
for x in range(1, pixels):
image.putpixel((random.randint(1, image.width - 1), random.randint(1, image.height - 1)), (random.randint(1, 255), random.randint(1, 255), random.randint(1, 255)))
new_img = image
return new_img
| mit | Python | |
471d1d4ae197c7643eeac374a0353adbce54fd44 | add scheme to grabber api url if not present | gravyboat/streamlink,streamlink/streamlink,streamlink/streamlink,wlerin/streamlink,back-to/streamlink,mmetak/streamlink,chhe/streamlink,melmorabity/streamlink,bastimeyer/streamlink,gravyboat/streamlink,chhe/streamlink,wlerin/streamlink,beardypig/streamlink,melmorabity/streamlink,bastimeyer/streamlink,mmetak/streamlink,back-to/streamlink,javiercantero/streamlink,javiercantero/streamlink,beardypig/streamlink | src/streamlink/plugins/nineanime.py | src/streamlink/plugins/nineanime.py | import re
from streamlink.plugin import Plugin
from streamlink.plugin.api import http
from streamlink.plugin.api import useragents
from streamlink.plugin.api import validate
from streamlink.stream import HTTPStream
from streamlink.compat import urlparse
class NineAnime(Plugin):
_episode_info_url = "//9anime.to/ajax/episode/info"
_info_schema = validate.Schema({
"grabber": validate.url(),
"params": {
"id": validate.text,
"token": validate.text,
"options": validate.text,
}
})
_streams_schema = validate.Schema({
"token": validate.text,
"error": None,
"data": [{
"label": validate.text,
"file": validate.url(),
"type": "mp4"
}]
})
_url_re = re.compile(r"https?://9anime.to/watch/(?:[^.]+?\.)(\w+)/(\w+)")
@classmethod
def can_handle_url(cls, url):
return cls._url_re.match(url) is not None
def add_scheme(self, url):
# update the scheme for the grabber url if required
if url.startswith("//"):
url = "{0}:{1}".format(urlparse(self.url).scheme, url)
return url
def _get_streams(self):
match = self._url_re.match(self.url)
film_id, episode_id = match.groups()
headers = {
"Referer": self.url,
"User-Agent": useragents.FIREFOX
}
# Get the info about the Episode, including the Grabber API URL
info_res = http.get(self.add_scheme(self._episode_info_url),
params=dict(update=0, film=film_id, id=episode_id),
headers=headers)
info = http.json(info_res, schema=self._info_schema)
# Get the data about the streams from the Grabber API
grabber_url = self.add_scheme(info["grabber"])
stream_list_res = http.get(grabber_url, params=info["params"], headers=headers)
stream_data = http.json(stream_list_res, schema=self._streams_schema)
for stream in stream_data["data"]:
yield stream["label"], HTTPStream(self.session, stream["file"])
__plugin__ = NineAnime
| import re
from streamlink.plugin import Plugin
from streamlink.plugin.api import http
from streamlink.plugin.api import validate
from streamlink.stream import HTTPStream
class NineAnime(Plugin):
_episode_info_url = "http://9anime.to/ajax/episode/info"
_info_schema = validate.Schema({
"grabber": validate.url(),
"params": {
"id": validate.text,
"token": validate.text,
"options": validate.text,
}
})
_streams_schema = validate.Schema({
"token": validate.text,
"error": None,
"data": [{
"label": validate.text,
"file": validate.url(),
"type": "mp4"
}]
})
_user_agent = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) "\
"Chrome/36.0.1944.9 Safari/537.36"
_url_re = re.compile(r"https?://9anime.to/watch/(?:[^.]+?\.)(\w+)/(\w+)")
@classmethod
def can_handle_url(cls, url):
return cls._url_re.match(url) is not None
def _get_streams(self):
match = self._url_re.match(self.url)
film_id, episode_id = match.groups()
headers = {
"Referer": self.url,
"User-Agent": self._user_agent
}
# Get the info about the Episode, including the Grabber API URL
info_res = http.get(self._episode_info_url,
params=dict(update=0, film=film_id, id=episode_id),
headers=headers)
info = http.json(info_res, schema=self._info_schema)
# Get the data about the streams from the Grabber API
stream_list_res = http.get(info["grabber"], params=info["params"], headers=headers)
stream_data = http.json(stream_list_res, schema=self._streams_schema)
for stream in stream_data["data"]:
yield stream["label"], HTTPStream(self.session, stream["file"])
__plugin__ = NineAnime
| bsd-2-clause | Python |
566e3e9140ef96d58aaa4bfc0f89d9429a978485 | add a script to get connections between the minima | js850/nested_sampling,js850/nested_sampling | get_connections.py | get_connections.py | from lj_run import LJClusterNew
import sys
from pygmin.landscape import Graph
natoms = int(sys.argv[1])
dbname = sys.argv[2]
system = LJClusterNew(natoms)
db = system.create_database(dbname)
while True:
min1 = db.minima()[0]
graph = Graph(db)
all_connected = True
for m2 in db.minima()[1:]:
if not graph.areConnected(min1, m2):
all_connected = False
break
if all_connected:
print "minima are all connected, ending"
exit(1)
connect = system.get_double_ended_connect(min1, m2, db, fresh_connect=True, load_no_distances=True)
connect.connect()
| bsd-2-clause | Python | |
d362847f0eb895dd3661a636f94b2216b6497ec6 | Add tests for Model. | jhckragh/magetool | magetool/tests/commands/model_test.py | magetool/tests/commands/model_test.py | import os
import unittest
from magetool.commands.model import Model
from magetool.commands.module import Module
from magetool.tests.util import remove_module, TEST_DIR
reference_reg_config = """<?xml version="1.0"?>
<config>
<modules>
<Foo_Quux>
<version>0.1.0</version>
</Foo_Quux>
</modules>
<global>
<models>
<quux>
<class>Foo_Quux_Model</class>
</quux>
</models>
</global>
</config>
"""
reference_config = """<?xml version="1.0"?>
<config>
<modules>
<Foo_Quux>
<version>0.1.0</version>
</Foo_Quux>
</modules>
<global>
<models>
<quux>
<class>Foo_Quux_Model</class>
<resourceModel>quux_mysql4</resourceModel>
</quux>
<quux_mysql4>
<class>Foo_Quux_Model_Mysql4</class>
<entities>
<tag>
<table>quux_tag</table>
</tag>
</entities>
</quux_mysql4>
</models>
</global>
</config>
"""
reference_model = """<?php
class Foo_Quux_Model_Tag extends Mage_Core_Model_Abstract
{
protected function _construct()
{
$this->_init('quux/tag');
}
}
"""
reference_resource = """<?php
class Foo_Quux_Model_Mysql4_Tag extends Mage_Core_Model_Mysql4_Abstract
{
protected function _construct()
{
$this->_init('quux/tag', 'tag_id');
}
}
"""
class ModelTest(unittest.TestCase):
def setUp(self):
self.old_cwd = os.getcwd()
os.chdir(TEST_DIR)
Module().create("Quux")
os.chdir("Quux")
self.model = Model()
def tearDown(self):
os.chdir("..")
remove_module("Foo", "Quux")
os.chdir(self.old_cwd)
del self.model
def test_register(self):
self.model.register()
with open(os.path.join("etc", "config.xml")) as config:
self.assertEqual(reference_reg_config, config.read())
def test_create(self):
self.model.create("Tag")
with open(os.path.join("etc", "config.xml")) as config:
self.assertEqual(reference_config, config.read())
with open(os.path.join("Model", "Tag.php")) as model:
self.assertEqual(reference_model, model.read())
with open(os.path.join("Model", "Mysql4", "Tag.php")) as resource:
self.assertEqual(reference_resource, resource.read())
os.remove(os.path.join("Model", "Mysql4", "Tag.php"))
os.rmdir(os.path.join("Model", "Mysql4"))
os.remove(os.path.join("Model", "Tag.php"))
if __name__ == "__main__":
unittest.main()
| bsd-2-clause | Python | |
72b701652271178e08d9cccd088d24177d4a2fc6 | Add functions for storing/getting blogs and posts | jamalmoir/pyblogit | pyblogit/database_handler.py | pyblogit/database_handler.py | """
pyblogit.database_handler
~~~~~~~~~~~~~~~~~~~~~~~~~
This module handles the connection and manipulation of the local database.
"""
import sqlite3
def get_cursor(blog_id):
"""Connects to a local sqlite database"""
conn = sqlite3.connect(blog_id)
c = conn.cursor()
return c
def add_blog(blog_id, blog_name):
"""Adds a new blog to the local blogs database and
creates a new database for the blog."""
# These two statements create the database files if
# they don't exist.
c = get_cursor('blogs')
blog_c = get_cursor(blog_id)
# Check if blogs table exists, if it doesn't create it.
exists = bool(c.execute('SELECT name FROM sqlite_master WHERE type="table"
AND name="blogs"'))
if not exists:
c.execute('CREATE TABLE blogs(blog_id INT, blog_name TEXT)')
sql = ('INSERT INTO blogs(blog_id, blog_name) values({blog_id},
{blog_name})'.format(blog_id=blog_id, blog_name=blog_name))
c.execute(sql)
# Create table to store posts in new blog's database.
blog_c.execute('CREATE TABLE posts(post_id INT, title TEXT, url TEXT,
status TEXT, content TEXT, updated INT)')
def get_blogs():
"""Returns all stored blogs."""
c = get_cursor('blogs')
blogs = c.execute('SELECT * FROM blogs')
return blogs
def get_post(blog_id, post_id):
"""Retrieves a post from a local database."""
c = get_cursor(blog_id)
sql = 'SELECT * FROM posts WHERE post_id = {p_id}'.format(p_id=post_id)
c.execute(sql)
post = c.fetchone()
return post
def get_posts(blog_id, limit=None):
"""Retrieves all the posts from a local database, if a limit
is specified, it will retrieve up to that amount of posts."""
c = get_cursor(blog_id)
sql = 'SELECT * FROM posts'
if limit:
limit = 'LIMIT {lim}'.format(lim=limit)
sql = ''.join([sql, limit])
c.execute(sql)
posts = c.fetchall()
return posts
def update_post(blog_id, post_id, post):
# TODO: update post in local database
pass
def add_post(blog_id, post):
# TODO: insert new post in local database
pass
| mit | Python | |
726316b50209dfc5f6a8f6373cd7e3f53e267bb3 | Implement a genre string parser | 6/GeoDJ,6/GeoDJ | geodj/genre_parser.py | geodj/genre_parser.py | import re
from django.utils.encoding import smart_str
class GenreParser:
@staticmethod
def parse(genre):
genre = smart_str(genre).lower()
if re.search(r"\b(jazz|blues)\b", genre):
return "jazz"
if re.search(r"\b(ska|reggae|ragga|dub)\b", genre):
return "ska"
elif re.search(r"\b(r&b|funk|soul)\b", genre):
return "r&b"
elif re.search(r"\bfolk\b", genre):
return "folk"
elif re.search(r"\b(country|bluegrass)\b", genre):
return "country"
elif re.search(r"\b(rap|hip hop|crunk|trip hop)\b", genre):
return "hiphop"
elif re.search(r"\bpop\b", genre):
return "pop"
elif re.search(r"\b(rock|metal|punk)\b", genre):
return "rock"
elif re.search(r"\b(electronic|electronica|electro|house|techno|ambient|chiptune|industrial|downtempo|drum and bass|trance|dubstep)\b", genre):
return "electronic"
elif re.search(r"\b(classical|orchestra|opera|piano|violin|cello)\b", genre):
return "classical"
| mit | Python | |
23402487a2b12aca391bb5958b4ba3e9424a6801 | Add a new management command 'olccperiodic' to update the 'on_sale' property for all products. | twaddington/django-olcc,twaddington/django-olcc,twaddington/django-olcc | django_olcc/olcc/management/commands/olccperiodic.py | django_olcc/olcc/management/commands/olccperiodic.py | import datetime
from django.core.management.base import BaseCommand
from django.db import IntegrityError, transaction
from olcc.models import Product, ProductPrice
from optparse import make_option
class Command(BaseCommand):
help = """\
A command to be run periodically to calculate Product status
from updated price data.
Currently this command iterates over all product records and
toggles the 'on_sale' property if the item's price has dropped
since last month."""
option_list = BaseCommand.option_list + (
make_option('--quiet', action='store_true', dest='quiet',
default=False, help='Suppress all output except errors'),
)
def uprint(self, msg):
"""
Unbuffered print.
"""
if not self.quiet:
self.stdout.write("%s\n" % msg)
self.stdout.flush()
@transaction.commit_on_success
def handle(self, *args, **options):
self.quiet = options.get('quiet', False)
# Get today's date
today = datetime.date.today()
# Get the first of this month
this_month = today.replace(day=1)
# Get the first of last month
try:
last_month = today.replace(month=today.month-1, day=1)
except ValueError:
if today.month == 1:
last_month = today.replace(year=today.year-1, month=12, day=1)
# Update the on sale flag for all products
count = 0
for p in Product.objects.all().order_by('title'):
try:
current_price = p.prices.get(effective_date=this_month)
previous_price = p.prices.get(effective_date=last_month)
if current_price.amount < previous_price.amount:
p.on_sale = True
self.uprint('[SALE]: %s' % p)
count += 1
else:
p.on_sale = False
# Persist our changes
p.save()
except ProductPrice.DoesNotExist:
pass
self.uprint('\n%s items have dropped in price!' % count)
| mit | Python | |
860d81ec5f0b9ae4c28a1996773c06240c31b67a | Update names | zbwrnz/tkinter-practice | canvas.py | canvas.py | #!/usr/bin/env python3
from tkinter import *
from tkinter import ttk
import math
class App:
def __init__(self):
self.lastx = 0
self.lasty = 0
self.fill = 'red'
self.width = 2
root = Tk()
root.columnconfigure(0, weight=1)
root.rowconfigure(0, weight=1)
self.canvas = Canvas(root)
self.canvas.grid(column=0, row=0, sticky=(N, W, E, S))
self.canvas.bind("<Button-1>", self.xy)
self.canvas.bind("<B1-Motion>", self.addLine)
# with Windows
self.canvas.bind("<MouseWheel>", self.changeWidth)
# with Linux OS
self.canvas.bind("<Button-4>", self.changeWidth)
self.canvas.bind("<Button-5>", self.changeWidth)
root.mainloop()
def xy(self, event):
self.lastx, self.lasty = event.x, event.y
def changeWidth(self, event):
# Why?
if event.num == 5 or event.delta == -120:
self.width = max(1, self.width - 1)
# Why? What is the significance of 4 and 5?
if event.num == 4 or event.delta == 120:
self.width = min(500, self.width + 1)
def addLine(self, event):
if self.fill == 'red':
self.fill = 'blue'
else:
self.fill = 'red'
self.canvas.create_line(
self.lastx, self.lasty, event.x, event.y,
fill=self.fill, width=math.floor(self.width))
self.lastx, self.lasty = event.x, event.y
App()
| unlicense | Python | |
d5c7d429be93a2b2de4a1c09bd73f72c02664499 | Move win32 audio experiment to trunk. | adamlwgriffiths/Pyglet,adamlwgriffiths/Pyglet,seeminglee/pyglet64,niklaskorz/pyglet,niklaskorz/pyglet,niklaskorz/pyglet,adamlwgriffiths/Pyglet,seeminglee/pyglet64,adamlwgriffiths/Pyglet,seeminglee/pyglet64,niklaskorz/pyglet | experimental/directshow.py | experimental/directshow.py | #!/usr/bin/python
# $Id:$
# Play an audio file with DirectShow. Tested ok with MP3, WMA, MID, WAV, AU.
# Caveats:
# - Requires a filename (not from memory or stream yet). Looks like we need
# to manually implement a filter which provides an output IPin. Lot of
# work.
# - Theoretically can traverse the graph to get the output filter, which by
# default is supposed to implement IDirectSound3DBuffer, for positioned
# sounds. Untested.
# - Requires comtypes. Can work around this in future by implementing the
# small subset of comtypes ourselves (or including a snapshot of comtypes in
# pyglet).
import ctypes
from comtypes import client
import sys
import time
filename = sys.argv[1]
qedit = client.GetModule('qedit.dll') # DexterLib
quartz = client.GetModule('quartz.dll') #
CLSID_FilterGraph = '{e436ebb3-524f-11ce-9f53-0020af0ba770}'
filter_graph = client.CreateObject(CLSID_FilterGraph,
interface=qedit.IFilterGraph)
filter_builder = filter_graph.QueryInterface(qedit.IGraphBuilder)
filter_builder.RenderFile(filename, None)
media_control = filter_graph.QueryInterface(quartz.IMediaControl)
media_control.Run()
try:
# Look at IMediaEvent interface for EOS notification
while True:
time.sleep(1)
except KeyboardInterrupt:
pass
# Need these because finalisers don't have enough context to clean up after
# themselves when script exits.
del media_control
del filter_builder
del filter_graph
| bsd-3-clause | Python | |
285c852bb246042a4f882ab9ca2948e4f0241dac | add GTC.meshgrid Core | shmilee/gdpy3,shmilee/gdpy3,shmilee/gdpy3,shmilee/gdpy3 | src/processors/GTC/meshgrid.py | src/processors/GTC/meshgrid.py | # -*- coding: utf-8 -*-
# Copyright (c) 2018 shmilee
'''
Source fortran code:
v110922
-------
diagnosis.F90, subroutine diagnosis:37-50
!!diagnosis xy
if(mype==1)then
open(341,file='meshgrid.out',status='replace')
do i=0,mpsi
write(341,*)psimesh(i)
write(341,*)sprpsi(psimesh(i))
write(341,*)qmesh(i)
write(341,*)kapatmti(i)
write(341,*)kapatmte(i)
write(341,*)kapatmni(i)
write(341,*)kapatmne(i)
enddo
close(341)
endif
'''
import numpy
from ..basecore import BaseCore, log
__all__ = ['MeshgridCoreV110922']
class MeshgridCoreV110922(BaseCore):
'''
Meshgrid data
1) psimesh, sprpsi, qmesh, kapatmti, kapatmte, kapatmni, kapatmne
Shape of the array data is (mpsi+1,).
'''
__slots__ = []
instructions = ['dig']
filepatterns = ['^(?P<group>meshgrid)\.out$',
'.*/(?P<group>meshgrid)\.out$']
grouppattern = '^meshgrid$'
_datakeys = (
'psimesh', 'sprpsi', 'qmesh',
'kapatmti', 'kapatmte', 'kapatmni', 'kapatmne')
def _dig(self):
'''Read 'meshgrid.out'.'''
with self.rawloader.get(self.file) as f:
log.ddebug("Read file '%s'." % self.file)
outdata = f.readlines()
sd = {}
shape = (7, len(outdata) // 7)
outdata = outdata[:len(outdata) // 7 * 7]
if len(outdata) % 7 != 0:
log.warn("Missing some raw data in '%s'! Guess the shape '%s'."
% (self.file, shape))
log.debug("Filling datakeys: %s ..." % str(self._datakeys[:]))
outdata = numpy.array([float(n.strip()) for n in outdata])
outdata = outdata.reshape(shape, order='F')
for i, key in enumerate(self._datakeys):
sd.update({key: outdata[i]})
return sd
| mit | Python | |
e731bfdabbf42b636b02e93ccd3b67c55a28d213 | add unit test | kathryncrouch/Axelrod,bootandy/Axelrod,bootandy/Axelrod,risicle/Axelrod,uglyfruitcake/Axelrod,drvinceknight/Axelrod,uglyfruitcake/Axelrod,kathryncrouch/Axelrod,risicle/Axelrod,emmagordon/Axelrod,mojones/Axelrod,emmagordon/Axelrod,mojones/Axelrod | axelrod/tests/test_appeaser.py | axelrod/tests/test_appeaser.py | """
Test for the appeaser strategy
"""
import unittest
import axelrod
class TestAppeaser(unittest.TestCase):
def test_strategy(self):
P1 = axelrod.Appeaser()
P2 = axelrod.Player()
P1.str = 'C';
self.assertEqual(P1.strategy(P2), 'C')
P1.history = ['C']
P1.history = ['C']
self.assertEqual(P1.strategy(P2), 'C')
P1.history = ['C', 'D', 'C']
P2.history = ['C', 'C', 'D']
self.assertEqual(P1.strategy(P2), 'D')
def test_representation(self):
P1 = axelrod.Appeaser()
self.assertEqual(str(P1), 'Appeaser')
| mit | Python | |
0a0d31077746e69bf5acc7d90fa388e121544339 | Add skeleton for new python scripts. | lweasel/misc_bioinf,lweasel/misc_bioinf,lweasel/misc_bioinf | script_skeleton.py | script_skeleton.py | #!/usr/bin/python
"""Usage:
<SCRIPT_NAME> [--log-level=<log-level>]
-h --help
Show this message.
-v --version
Show version.
--log-level=<log-level>
Set logging level (one of {log_level_vals}) [default: info].
"""
import docopt
import ordutils.log as log
import ordutils.options as opt
import schema
import sys
LOG_LEVEL = "--log-level"
LOG_LEVEL_VALS = str(log.LEVELS.keys())
def validate_command_line_options(options):
# Validate command-line options
try:
opt.validate_dict_option(
options[LOG_LEVEL], log.LEVELS, "Invalid log level")
except schema.SchemaError as exc:
exit(exc.code)
def main(docstring):
# Read in and validate command line options
options = docopt.docopt(docstring, version="<SCRIPT_NAME> v0.1")
validate_command_line_options(options)
# Set up logger
logger = log.getLogger(sys.stderr, options[LOG_LEVEL])
# Rest of script...
if __name__ == "__main__":
main(__doc__)
| mit | Python | |
63b954c952dda9d123e6fa1e348babae97523e21 | Create securitygroup.py | pathakvaidehi2391/WorkSpace,pathakvaidehi2391/WorkSpace | azurecloudify/securitygroup.py | azurecloudify/securitygroup.py | apache-2.0 | Python | ||
01f7ef27825baf76b3dd9afaa2f4c12e05272d9d | Add Commodity Futures Trading Commission. | lukerosiak/inspectors-general,divergentdave/inspectors-general | inspectors/cftc.py | inspectors/cftc.py | #!/usr/bin/env python
import datetime
import logging
import os
import re
from urllib.parse import urljoin
from bs4 import BeautifulSoup
from utils import utils, inspector
# http://www.cftc.gov/About/OfficeoftheInspectorGeneral/index.htm
# Oldest report: 2000
# options:
# standard since/year options for a year range to fetch from.
#
# Notes for IG's web team:
# - Add published dates for all reports in REPORT_PUBLISHED_MAPPING
REPORT_PUBLISHED_MAPPING = {
"oig_auditreportp05": datetime.datetime(2014, 7, 17),
"oigocoaudit2014": datetime.datetime(2014, 5, 1),
"oigcommentletter042214": datetime.datetime(2014, 4, 22),
}
REPORTS_URL = "http://www.cftc.gov/About/OfficeoftheInspectorGeneral/index.htm"
def run(options):
year_range = inspector.year_range(options)
# Pull the reports
doc = BeautifulSoup(utils.download(REPORTS_URL))
results = doc.select("ul.text > ul > li")
for result in results:
report = report_from(result, year_range)
if report:
inspector.save_report(report)
# Pull the semiannual reports
results = doc.select("ul.text td a")
for result in results:
report = report_from(result, year_range)
if report:
inspector.save_report(report)
def report_from(result, year_range):
if result.name == 'a':
link = result
else:
link = result.select("a")[-1]
report_url = urljoin(REPORTS_URL, link.get('href'))
report_filename = report_url.split("/")[-1]
report_id, _ = os.path.splitext(report_filename)
title = link.text
if report_id in REPORT_PUBLISHED_MAPPING:
published_on = REPORT_PUBLISHED_MAPPING[report_id]
else:
try:
published_on_text = "/".join(re.search("(\w+) (\d+), (\d+)", title).groups())
published_on = datetime.datetime.strptime(published_on_text, '%B/%d/%Y')
except AttributeError:
try:
published_on_text = "/".join(re.search("(\w+) (\d+), (\d+)", str(link.next_sibling)).groups())
published_on = datetime.datetime.strptime(published_on_text, '%B/%d/%Y')
except AttributeError:
# For reports where we can only find the year, set them to Nov 1st of that year
published_on_year = int(re.search('(\d+)', title).groups()[0])
published_on = datetime.datetime(published_on_year, 11, 1)
if published_on.year not in year_range:
logging.debug("[%s] Skipping, not in requested range." % report_url)
return
report = {
'inspector': 'cftc',
'inspector_url': 'http://www.cftc.gov/About/OfficeoftheInspectorGeneral/index.htm',
'agency': 'cftc',
'agency_name': 'Commodity Futures Trading Commission',
'file_type': 'pdf',
'report_id': report_id,
'url': report_url,
'title': title,
'published_on': datetime.datetime.strftime(published_on, "%Y-%m-%d"),
}
return report
utils.run(run) if (__name__ == "__main__") else None
| cc0-1.0 | Python | |
24c6ede2c7950e36516f0611811ff922d7a5b86f | Create grayl_g-throughput.py | dschutterop/Graphite-graylog | grayl_g-throughput.py | grayl_g-throughput.py | #!/usr/bin/python
#
# == Synopsis
#
# Script to get Graylog throughput data pushed
# to Graphite
#
#
# === Workflow
# This script grabs JSON from your Graylog
# cluster, transforms data into
# valid Carbon metrics and delivers it into carbon
#
# Carbon only needs three things:
# <metric> <value> <timestamp>
#
# So what we do is grab the Graylog
# key as the metric, grab the Graylog value
# as the value and make up our own timestamp. Well,
# actually, that's done through time()
#
# Author : D. Schutterop
# Email : daniel@schutterop.nl
# Version : v0.1
#
# === HowTo
#
# Create a (read only!) user in Graylog, I've defaulted it
# to user 'graphite' with password 'graphite'
#
# Please replace the value of grayHost with the hostnames
# or IP address of (one of) your Graylog hosts
#
# Replace the value of grpHost (and port) of the Carbon
# server and, if you want, change the grpDatabase to
# something that makes sense to you.
#
# Feel free to change the runInterval with a value you feel
# comfortable with.
#
# Fire the script and see the data appear in Graphite
# (Creation of the database files may take some time...
#
#
import json,requests,time,socket,os,sys
runInterval = 15
grayHost = [ 'graylognode1.localdomain', 'graylognode2.localdomain', 'graylognode3.localdomain' ]
grayPort = 12900
grayUser = 'graphite' # Read only user!
grayPass = 'graphite'
grpHost = 'graphitehost.localdomain'
grpPort = 2003
grpDatabase = 'graylog'
#Suppress SSL warnings generated when contacting Foreman
requests.packages.urllib3.disable_warnings()
def grayGetData(grayHost,grayPort):
grayUrl = "http://%s:%s/system/throughput" % (grayHost,grayPort)
grayHeaders = {'Content-type': 'application/json'}
grayRequest = requests.get(grayUrl, verify=False, auth=(grayUser,grayPass), headers=grayHeaders)
return json.loads(grayRequest.text)
def grpPutMessage(grpMetricKey,grpMetricValue):
metricPrepend = grpDatabase
metricAppend = grpMetricKey
metricKey = "%s.%s" % (metricPrepend,grpMetricKey)
metricTime = int(time.time())
metricValue = grpMetricValue
return "%s %s %s" % (metricKey,metricValue,metricTime)
def run(runInterval):
while True:
grpSocket = socket.socket()
grpSocket.connect((grpHost,grpPort))
message = ' '
for node in grayHost:
grayData = grayGetData(node,grayPort)
for listItem in grayData:
nodeMetric = node.split('.',1)
message = "\n %s %s" % (grpPutMessage("%s.%s" % (nodeMetric[0],listItem),grayData[listItem]),message)
message = "%s \n" % (message)
grpSocket.sendall(message)
grpSocket.close()
time.sleep(runInterval)
if __name__ == "__main__":
procPid = os.fork()
if procPid != 0:
sys.exit(0)
print ("Running %s every %s seconds in the background." % (__file__,runInterval))
run(runInterval)
| apache-2.0 | Python | |
ac78f3f774dbfda4e2c96786ddebf74066a56f54 | add mtbf_job_runner | ShakoHo/mtbf_operation,Mozilla-TWQA/mtbf_operation,ypwalter/mtbf_operation,zapion/mtbf_operation | mtbf_job_runner.py | mtbf_job_runner.py | #!/usr/bin/env python
import combo_runner.action_decorator
from combo_runner.base_action_runner import BaseActionRunner
from utils.zip_utils import modify_zipfile
import os
class MtbfJobRunner(BaseActionRunner):
action = combo_runner.action_decorator.action
def pre_flash(self):
pass
def flash(self):
pass
def post_flash(self):
pass
# @action
def add_7mobile_action(self, action=False):
# require import gaia_data_layer to call setSettings
if __name__ == '__main__':
MtbfJobRunner().add_7mobile_action()
| mpl-2.0 | Python | |
f5bb497960f9f9256cc9794baf0c53c4ba5d734f | Add Spider class for web crawling. | SaltusVita/ReoGrab | Spider.py | Spider.py | '''
Created on 7/07/2016
@author: garet
'''
class Spider():
def __init__(self):
pass | bsd-3-clause | Python | |
8c8d28e95cf99f8aff4ba45819b08995ef63ea44 | add hubble | ashumeow/random-space-images | hubble.py | hubble.py | import urllib
import os
def fetchImages(start, stop):
counter = 0
imgIndex = start
for i in range(start, start+stop+1):
urllib.urlretrieve(""+str(imgIndex)+".jpg", str(imgIndex)+".jpg")
print("Image# "+str(counter)+" of "+str(stop)+" captured.")
counter += 1
imgIndex += 1
print("Finished")
def pathFolder():
cur_folder = os.getcwd()
if not os.path.exists(cur_folder+"/space/hubble/"):
print("Wait.. Folder not found!\n")
print("Creating a new folder...\n")
os.makedirs(cur_folder+"/space/hubble/")
os.chdir(cur_folder+"/space/hubble/")
else:
os.chdir(cur_folder+"/space/hubble/")
def main():
print("Random Images Capture from Hubble Telescope\n")
start = raw_input("Pick a random number?:\n")
stop = raw_input("How many images do you want to download?: ")
print("Capturing images...\n")
pathFolder()
fetchImages(int(start), int(stop))
main()
| mit | Python | |
62296474a389f684dbc1b66fb5256d494111b7c9 | Add a script to reproduce ezio issue #4 | kingsamchen/Eureka,kingsamchen/Eureka,kingsamchen/Eureka,kingsamchen/Eureka,kingsamchen/Eureka,kingsamchen/Eureka,kingsamchen/Eureka,kingsamchen/Eureka | SocketABC/ezio_issue4_reproduce.py | SocketABC/ezio_issue4_reproduce.py | # -*- coding: utf-8 -*-
import hashlib
import socket
import struct
SERVER_NAME = 'localhost'
SERVER_PORT = 9876
def main():
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client_socket.connect((SERVER_NAME, SERVER_PORT))
msg_len = 1600
payload = 'a' * msg_len
msg = struct.pack('!Q', msg_len) + payload.encode('utf-8')
client_socket.sendall(msg)
print('data sent')
pin = client_socket.recv(256)
print(len(pin))
print('received hash: \t' + pin.decode('utf-8'))
md5 = hashlib.md5()
md5.update(payload.encode('utf-8'))
print('local hash: \t' + md5.hexdigest())
if __name__ == '__main__':
main()
| mit | Python | |
6d93e603cd45544e296b8cd90853377688af6376 | Add median/LoG filter fcn | mfaytak/image-pca | imgphon/ultrasound.py | imgphon/ultrasound.py | import numpy as np
from scipy.ndimage import median_filter
from scipy.ndimage.filters import gaussian_laplace
def clean_frame(frame, median_radius=5, log_sigma=4):
"""
Input: ndarray image, filter kernel settings
Output: cleaned ndarray image
A median filter is used to remove speckle noise,
followed by edge sharpening with a Laplacian
of Gaussian (LoG) mask.
"""
# TODO scale input image in range (0,1)
# TODO provide default for median_radius that is
# sensitive to image dimensions
frame = frame.astype(np.int64)
medfilt = median_filter(frame, median_radius)
logmask = gaussian_laplace(medfilt, log_sigma)
cleaned = medfilt + logmask
cleaned = cleaned.astype(np.uint8)
return cleaned | bsd-2-clause | Python | |
ca3add180e8dc124e9ebec35682215a6de0ae9b1 | Add test_poly_divide script. | pearu/sympycore,pearu/sympycore | research/test_poly_divide.py | research/test_poly_divide.py |
# use time() instead on unix
import sys
if sys.platform=='win32':
from time import clock
else:
from time import time as clock
from sympycore import profile_expr
def time1(n=500):
import sympycore as sympy
w = sympy.Fraction(3,4)
x = sympy.polynomials.poly([0, 1, 1])
a = (x-1)*(x-2)*(x-3)*(x-4)*(x-5)
b = (x-1)*(x-2)*(x-w)
t1 = clock()
while n:
divmod(a, b)
n -= 1
t2 = clock()
return 100 / (t2-t1)
def time2(n=500):
import sympycore as sympy
w = sympy.Fraction(3,4)
x = sympy.polynomials.PolynomialRing[1]([0, 1, 1])
a = (x-1)*(x-2)*(x-3)*(x-4)*(x-5)
b = (x-1)*(x-2)*(x-w)
t1 = clock()
while n:
divmod(a, b)
n -= 1
t2 = clock()
return 100 / (t2-t1)
def timing():
t1 = time1()
t2 = time2()
return t1, t2, t1/t2
print timing()
print timing()
print timing()
profile_expr('time2(50)')
| bsd-3-clause | Python | |
8adbbc365042d49c1304610b3425e0974b1c6451 | Switch a little of the html generation to jinja2 | cowlicks/blaze,ChinaQuants/blaze,cpcloud/blaze,jcrist/blaze,AbhiAgarwal/blaze,cowlicks/blaze,LiaoPan/blaze,mwiebe/blaze,nkhuyu/blaze,FrancescAlted/blaze,mwiebe/blaze,dwillmer/blaze,FrancescAlted/blaze,LiaoPan/blaze,cpcloud/blaze,mwiebe/blaze,jdmcbr/blaze,maxalbert/blaze,FrancescAlted/blaze,mrocklin/blaze,aterrel/blaze,scls19fr/blaze,nkhuyu/blaze,ChinaQuants/blaze,markflorisson/blaze-core,mwiebe/blaze,caseyclements/blaze,alexmojaki/blaze,jdmcbr/blaze,aterrel/blaze,FrancescAlted/blaze,mrocklin/blaze,dwillmer/blaze,xlhtc007/blaze,alexmojaki/blaze,AbhiAgarwal/blaze,markflorisson/blaze-core,xlhtc007/blaze,markflorisson/blaze-core,ContinuumIO/blaze,maxalbert/blaze,jcrist/blaze,markflorisson/blaze-core,AbhiAgarwal/blaze,AbhiAgarwal/blaze,ContinuumIO/blaze,scls19fr/blaze,aterrel/blaze,caseyclements/blaze | blaze/server/datashape_html.py | blaze/server/datashape_html.py | from ..datashape import DataShape, Record, Fixed, Var, CType, String, JSON
#from blaze_server_config import jinja_env
from jinja2 import Template
json_comment_templ = Template("""<font style="font-size:x-small"> # <a href="{{base_url}}?r=data.json">JSON</a></font>
""")
datashape_outer_templ = Template("""
<pre>
type <a href="{{base_url}}?r=datashape">BlazeDataShape</a> = {{ds_html}}
</pre>
""")
def render_datashape_recursive(base_url, ds, indent):
result = ''
if isinstance(ds, DataShape):
for dim in ds[:-1]:
if isinstance(dim, Fixed):
result += ('%d, ' % dim)
elif isinstance(dim, Var):
result += 'var, '
else:
raise TypeError('Cannot render datashape with dimension %r' % dim)
result += render_datashape_recursive(base_url, ds[-1], indent)
elif isinstance(ds, Record):
result += '{' + json_comment_templ.render(base_url=base_url)
for fname, ftype in zip(ds.names, ds.types):
child_url = base_url + '.' + fname
child_result = render_datashape_recursive(child_url,
ftype, indent + ' ')
result += (indent + ' ' +
'<a href="' + child_url + '">' + str(fname) + '</a>'
': ' + child_result + ';')
if isinstance(ftype, Record):
result += '\n'
else:
result += json_comment_templ.render(base_url=child_url)
result += (indent + '}')
elif isinstance(ds, (CType, String, JSON)):
result += str(ds)
else:
raise TypeError('Cannot render datashape %r' % ds)
return result
def render_datashape(base_url, ds):
ds_html = render_datashape_recursive(base_url, ds, '')
return datashape_outer_templ.render(base_url=base_url, ds_html=ds_html)
| from ..datashape import DataShape, Record, Fixed, Var, CType, String, JSON
#from blaze_server_config import jinja_env
#from jinja2 import Template
def json_comment(array_url):
return '<font style="font-size:x-small"> # <a href="' + \
array_url + '?r=data.json">JSON</a></font>\n'
def render_datashape_recursive(base_url, ds, indent):
result = ''
if isinstance(ds, DataShape):
for dim in ds[:-1]:
if isinstance(dim, Fixed):
result += ('%d, ' % dim)
elif isinstance(dim, Var):
result += 'var, '
else:
raise TypeError('Cannot render datashape with dimension %r' % dim)
result += render_datashape_recursive(base_url, ds[-1], indent)
elif isinstance(ds, Record):
result += '{' + json_comment(base_url)
for fname, ftype in zip(ds.names, ds.types):
child_url = base_url + '.' + fname
child_result = render_datashape_recursive(child_url,
ftype, indent + ' ')
result += (indent + ' ' +
'<a href="' + child_url + '">' + str(fname) + '</a>'
': ' + child_result + ';')
if isinstance(ftype, Record):
result += '\n'
else:
result += json_comment(child_url)
result += (indent + '}')
elif isinstance(ds, (CType, String, JSON)):
result += str(ds)
else:
raise TypeError('Cannot render datashape %r' % ds)
return result
def render_datashape(base_url, ds):
print('base url is %s' % base_url)
result = render_datashape_recursive(base_url, ds, '')
result = '<pre>\ntype <a href="' + base_url + \
'?r=datashape">BlazeDataShape</a> = ' + result + '\n</pre>'
return result
| bsd-3-clause | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.