commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13
values | lang stringclasses 23
values |
|---|---|---|---|---|---|---|---|---|
04664a035c2d67236ae9befe83110190d17b194c | Add a couple of TODOs | Rosuav/shed,Rosuav/shed,Rosuav/shed,Rosuav/shed,Rosuav/shed | updates.py | updates.py | #!/usr/bin/python3
# requires system Python and the python3-apt package
from collections import OrderedDict # Starting with Python 3.7, we could just use vanilla dicts
import apt # ImportError? apt install python3-apt
def describe(pkg):
# Python 3.7 equivalent:
# return {"Name": pkg.name, "Installed": pkg.installed.version, "Candidate": pkg.candidate.version}
return OrderedDict((("Name", pkg.name), ("Current", pkg.installed.version), ("Target", pkg.candidate.version)))
def show_packages(scr, upgrades, auto):
def print(s="", *args):
scr.addstr(str(s) + "\n", *args)
desc = [describe(pkg) for pkg in upgrades]
widths = OrderedDict((x, len(x)) for x in desc[0]) # Start with header widths
for d in desc:
for col in d:
widths[col] = max(widths[col], len(d[col]))
fmt = "[ ] " + " ".join("%%-%ds" % col for col in widths.values())
print(fmt % tuple(widths), curses.A_BOLD)
print("--- " + " ".join("-" * col for col in widths.values()))
# TODO: Cope with more packages than lines on the screen (scroll? paginate?)
for d in desc:
print(fmt % tuple(d.values()))
print()
if auto: print("Plus %d auto-installed packages." % auto)
print("Select packages to upgrade, then Enter to apply.")
print("Press I for more info on a package [TODO]")
pkg = 0
install = [False] * len(upgrades)
while True:
scr.move(pkg + 2, 1)
key = scr.getkey()
if key == "Q" or key == "q": return []
if key == "\n": break
if key == "KEY_UP": pkg = (pkg - 1) % len(upgrades)
if key == "KEY_DOWN": pkg = (pkg + 1) % len(upgrades)
if key == " ":
install[pkg] = not install[pkg]
scr.addstr(pkg + 2, 1, "X" if install[pkg] else " ")
if key == "I" or key == "i":
# TODO: Show a new window with package info
# Show the from and to versions, optionally the changelog,
# and ideally, the list of other packages that would be
# upgraded along with this one (its out-of-date deps).
pass
# scr.addstr(len(upgrades) + 7, 0, repr(key))
return [pkg for pkg, keep in zip(upgrades, install) if keep]
def main():
cache = apt.Cache()
cache.open()
upgrades = []
auto = 0
for pkg in cache:
if not pkg.is_installed: continue # This is checking upgrades only
if pkg.candidate == pkg.installed: continue # Already up-to-date
if pkg.is_auto_installed:
# Ignore (but summarize) autoinstalled packages
auto += 1
continue
upgrades.append(pkg)
if not upgrades:
print("Everything up-to-date.")
return
global curses; import curses
upgrades = curses.wrapper(show_packages, upgrades, auto)
if not upgrades: return
for pkg in upgrades:
pkg.mark_upgrade()
# TODO: Show progress while it downloads? Not sure why the default progress
# isn't being shown. Might need to subclass apt.progress.text.AcquireProgress?
cache.commit()
if __name__ == "__main__":
main()
| #!/usr/bin/python3
# requires system Python and the python3-apt package
from collections import OrderedDict # Starting with Python 3.7, we could just use vanilla dicts
import apt # ImportError? apt install python3-apt
def describe(pkg):
# Python 3.7 equivalent:
# return {"Name": pkg.name, "Installed": pkg.installed.version, "Candidate": pkg.candidate.version}
return OrderedDict((("Name", pkg.name), ("Current", pkg.installed.version), ("Target", pkg.candidate.version)))
def show_packages(scr, upgrades, auto):
def print(s="", *args):
scr.addstr(str(s) + "\n", *args)
desc = [describe(pkg) for pkg in upgrades]
widths = OrderedDict((x, len(x)) for x in desc[0]) # Start with header widths
for d in desc:
for col in d:
widths[col] = max(widths[col], len(d[col]))
fmt = "[ ] " + " ".join("%%-%ds" % col for col in widths.values())
print(fmt % tuple(widths), curses.A_BOLD)
print("--- " + " ".join("-" * col for col in widths.values()))
# TODO: Cope with more packages than lines on the screen (scroll? paginate?)
for d in desc:
print(fmt % tuple(d.values()))
print()
if auto: print("Plus %d auto-installed packages." % auto)
print("Select packages to upgrade, then Enter to apply.")
print("Press I for more info on a package [TODO]")
pkg = 0
install = [False] * len(upgrades)
while True:
scr.move(pkg + 2, 1)
key = scr.getkey()
if key == "Q" or key == "q": return []
if key == "\n": break
if key == "KEY_UP": pkg = (pkg - 1) % len(upgrades)
if key == "KEY_DOWN": pkg = (pkg + 1) % len(upgrades)
if key == " ":
install[pkg] = not install[pkg]
scr.addstr(pkg + 2, 1, "X" if install[pkg] else " ")
# scr.addstr(len(upgrades) + 7, 0, repr(key))
return [pkg for pkg, keep in zip(upgrades, install) if keep]
def main():
cache = apt.Cache()
cache.open()
upgrades = []
auto = 0
for pkg in cache:
if not pkg.is_installed: continue # This is checking upgrades only
if pkg.candidate == pkg.installed: continue # Already up-to-date
if pkg.is_auto_installed:
# Ignore (but summarize) autoinstalled packages
auto += 1
continue
upgrades.append(pkg)
if not upgrades:
print("Everything up-to-date.")
return
global curses; import curses
upgrades = curses.wrapper(show_packages, upgrades, auto)
if not upgrades: return
for pkg in upgrades:
pkg.mark_upgrade()
cache.commit()
if __name__ == "__main__":
main()
| mit | Python |
1e7f9d13d0e118d0d75225f69628680c8af3a8ae | fix name | ponty/pyscreenshot,ponty/pyscreenshot,ponty/pyscreenshot | tests/test_pyside.py | tests/test_pyside.py | from ref import backend_ref
from size import backend_size
def test_size_pyside():
backend_size('pyside')
def test_ref_pyside():
backend_ref('pyside')
| from ref import backend_ref
from size import backend_size
def test_size_pyqt():
backend_size('pyside')
def test_ref_pyqt():
backend_ref('pyside')
| bsd-2-clause | Python |
7e24a1be37871d58d5d39fcb6736716f99821dda | Improve help text | harrischristiansen/generals-bot | base/client/constants.py | base/client/constants.py | '''
@ Harris Christiansen (Harris@HarrisChristiansen.com)
Generals.io Automated Client - https://github.com/harrischristiansen/generals-bot
Constants: Constants used throughout the code
'''
SHOULD_DIRTY_MAP_ON_MOVE = True
ENDPOINT_BOT = "ws://botws.generals.io/socket.io/?EIO=3&transport=websocket"
ENDPOINT_PUBLIC = "ws://ws.generals.io/socket.io/?EIO=3&transport=websocket"
BOT_KEY = "O13f0dijsf"
REPLAY_URLS = {
'na': "http://generals.io/replays/",
'eu': "http://eu.generals.io/replays/",
'bot': "http://bot.generals.io/replays/",
}
START_KEYWORDS = ["start", "go", "force", "play", "ready", "rdy"]
HELLO_KEYWORDS = ["hi", "hello", "hey", "sup", "myssix"]
HELP_KEYWORDS = ["help", "config", "change"]
GENERALS_MAPS = [
"KILL A KING",
"Plots",
"Speed",
"Experiment G",
"WIN or LOSE",
"The Inquisitor",
"Kingdom of Branches",
"Hidden 1",
]
DIRECTIONS = [(1, 0), (-1, 0), (0, 1), (0, -1)]
TILE_EMPTY = -1
TILE_MOUNTAIN = -2
TILE_FOG = -3
TILE_OBSTACLE = -4
# Opponent Type Definitions
OPP_EMPTY = 0
OPP_ARMY = 1
OPP_CITY = 2
OPP_GENERAL = 3
MAX_NUM_TEAMS = 8
PRE_HELP_TEXT = [
"| Hi, I am Myssix - a generals.io bot",
"| ======= Available Commands =======",
"| start: send force start",
"| speed 4: set game play speed [1, 2, 3, 4]",
"| map [top, hot]: assign a random map (from the top or hot list)",
"| map Map Name: assign map by name",
"| team 1: join a team [1 - 8]",
"| normal: set map to classic (no map)",
"| swamp 0.5: set swamp value for classic map",
"| Code available at: git.io/myssix",
]
GAME_HELP_TEXT = [
"| ======= Available Commands =======",
"| team: request not to be attacked",
"| unteam: cancel team",
"| Code available at: git.io/myssix",
]
HELLO_TEXT = [
" Hi, I am Myssix - a generals.io bot",
" Say 'go' to start, or 'help' for a list of additional commands",
" Code available at: git.io/myssix",
] | '''
@ Harris Christiansen (Harris@HarrisChristiansen.com)
Generals.io Automated Client - https://github.com/harrischristiansen/generals-bot
Constants: Constants used throughout the code
'''
SHOULD_DIRTY_MAP_ON_MOVE = True
ENDPOINT_BOT = "ws://botws.generals.io/socket.io/?EIO=3&transport=websocket"
ENDPOINT_PUBLIC = "ws://ws.generals.io/socket.io/?EIO=3&transport=websocket"
BOT_KEY = "O13f0dijsf"
REPLAY_URLS = {
'na': "http://generals.io/replays/",
'eu': "http://eu.generals.io/replays/",
'bot': "http://bot.generals.io/replays/",
}
START_KEYWORDS = ["start", "go", "force", "play", "ready", "rdy"]
HELLO_KEYWORDS = ["hi", "hello", "hey", "sup", "myssix"]
HELP_KEYWORDS = ["help", "config", "change"]
GENERALS_MAPS = [
"KILL A KING",
"Plots",
"Speed",
"Experiment G",
"WIN or LOSE",
"The Inquisitor",
"Kingdom of Branches",
"Hidden 1",
]
DIRECTIONS = [(1, 0), (-1, 0), (0, 1), (0, -1)]
TILE_EMPTY = -1
TILE_MOUNTAIN = -2
TILE_FOG = -3
TILE_OBSTACLE = -4
# Opponent Type Definitions
OPP_EMPTY = 0
OPP_ARMY = 1
OPP_CITY = 2
OPP_GENERAL = 3
MAX_NUM_TEAMS = 8
PRE_HELP_TEXT = [
"| Hi, I am Myssix - a generals.io bot",
"| ======= Available Commands =======",
"| start: send force start",
"| speed 4: set game play speed [1, 2, 3, 4]",
"| map [top, hot]: assign a random map (from the top or hot list)",
"| map Map Name: assign map by name",
"| team 1: join a team [1 - 8]",
"| normal: set map to classic (no map)",
"| swamp 0.5: set swamp value for classic map",
"| Code available at: git.io/myssix",
]
GAME_HELP_TEXT = [
"| ======= Available Commands =======",
"| team: request not to be attacked",
"| unteam: cancel team",
"| Code available at: git.io/myssix",
]
HELLO_TEXT = [
"| Hi, I am Myssix - a generals.io bot",
"| Say help for a list of available commands",
"| Code available at: git.io/myssix",
] | mit | Python |
1f9fa8db0811a8853e072492b14fef4c107411b7 | Fix server sys.path to find the pyqode package | zwadar/pyqode.core,pyQode/pyqode.core,pyQode/pyqode.core | test/server.py | test/server.py | """
Server used for tests
"""
import sys
import os
# ensure sys knows about pyqode.core
sys.path.insert(0, os.path.abspath('..'))
from pyqode.core import code_completion
from pyqode.core import server
from pyqode.core import workers
if __name__ == '__main__':
workers.CodeCompletion.providers.append(
code_completion.DocumentWordsProvider())
server.run()
| """
Server used for tests
"""
from pyqode.core import code_completion
from pyqode.core import server
from pyqode.core import workers
if __name__ == '__main__':
workers.CodeCompletion.providers.append(
code_completion.DocumentWordsProvider())
server.run()
| mit | Python |
0f72371fae4f614932adc45a3461c1bcff5f569a | Test for File.write_line() | BakeCode/performance-testing,BakeCode/performance-testing | tests/test_result.py | tests/test_result.py | import unittest
from performance_testing.result import Result, File
import os
import shutil
class ResultTestCase(unittest.TestCase):
def setUp(self):
self.current_directory = os.path.dirname(os.path.abspath(__file__))
self.result_directory = os.path.join(self.current_directory, 'assets/test_result')
self.test_file_name = 'foo_bar'
self.test_file_path = os.path.join(self.result_directory, self.test_file_name)
def clear_result_dir(self):
if os.path.exists(self.result_directory):
shutil.rmtree(self.result_directory)
def test_result_init(self):
self.clear_result_dir()
self.assertFalse(os.path.exists(self.result_directory))
result = Result(directory=self.result_directory)
self.assertTrue(result.file.path)
def test_file_init(self):
self.clear_result_dir()
self.assertFalse(os.path.exists(self.result_directory))
file = File(directory=self.result_directory, name=self.test_file_name)
self.assertTrue(os.path.exists(self.result_directory))
self.assertTrue(os.path.exists(self.test_file_path))
def test_write_file(self):
self.clear_result_dir()
file = File(directory=self.result_directory, name=self.test_file_name)
stream = open(self.test_file_path, 'r')
self.assertEqual(stream.read(), '')
stream.close()
text = 'askld asjdjidjj saidj98e12ud0- asid902ur890a'
file.write_line(text)
stream = open(self.test_file_path, 'r')
self.assertEqual(stream.read(), text + '\n')
stream.close()
def tearDown(self):
self.clear_result_dir()
| import unittest
from performance_testing.result import Result, File
import os
import shutil
class ResultTestCase(unittest.TestCase):
def setUp(self):
self.current_directory = os.path.dirname(os.path.abspath(__file__))
self.result_directory = os.path.join(self.current_directory, 'assets/test_result')
def clear_result_dir(self):
if os.path.exists(self.result_directory):
shutil.rmtree(self.result_directory)
def test_result_init(self):
self.clear_result_dir()
self.assertFalse(os.path.exists(self.result_directory))
result = Result(directory=self.result_directory)
self.assertTrue(result.file.path)
def test_file_init(self):
self.clear_result_dir()
file_name = 'foo_bar'
self.assertFalse(os.path.exists(self.result_directory))
file = File(directory=self.result_directory, name=file_name)
self.assertTrue(os.path.exists(self.result_directory))
self.assertTrue(os.path.exists(os.path.join(self.result_directory, file_name)))
def tear_down(self):
self.clear_result_dir()
| mit | Python |
ee06edd57dfef223b80d7b01feff4da723660de0 | Disable temporary tests on dates | skirsdeda/djangocms-blog,nephila/djangocms-blog,nephila/djangocms-blog,skirsdeda/djangocms-blog,nephila/djangocms-blog,skirsdeda/djangocms-blog | tests/test_search.py | tests/test_search.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
from haystack.constants import DEFAULT_ALIAS
from haystack.query import SearchQuerySet
from djangocms_blog.models import Post
from .base import BaseTest
class BlogIndexingTests(BaseTest):
def setUp(self):
self.get_pages()
def test_blog_post_is_indexed_using_prepare(self):
"""This tests the indexing path way used by update_index mgmt command"""
post = self._get_post(self._post_data[0]['en'])
post = self._get_post(self._post_data[0]['it'], post, 'it')
index = self.get_post_index()
index.index_queryset(DEFAULT_ALIAS) # initialises index._backend_alias
indexed = index.prepare(post)
self.assertEqual(post.get_title(), indexed['title'])
self.assertEqual(post.get_description(), indexed['description'])
self.assertEqual('First post First post first line This is the description category 1', indexed['text'])
self.assertEqual(post.get_absolute_url(), indexed['url'])
#self.assertEqual(post.date_published.strftime("%Y-%m-%d %H:%M:%S"), indexed['pub_date'])
def test_blog_post_is_indexed_using_update_object(self):
"""This tests the indexing path way used by the RealTimeSignalProcessor"""
post = self._get_post(self._post_data[0]['en'])
post = self._get_post(self._post_data[0]['it'], post, 'it')
index = self.get_post_index()
index.update_object(post, using=DEFAULT_ALIAS)
indexed = index.prepared_data
self.assertEqual(post.get_title(), indexed['title'])
self.assertEqual(post.get_description(), indexed['description'])
self.assertEqual('First post First post first line This is the description category 1', indexed['text'])
self.assertEqual(post.get_absolute_url(), indexed['url'])
#self.assertEqual(post.date_published.strftime("%Y-%m-%d %H:%M:%S"), indexed['pub_date'])
def test_searchqueryset(self):
posts = self.get_posts()
all_results = SearchQuerySet().models(Post)
self.assertEqual(len(posts), len(all_results))
| # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
from haystack.constants import DEFAULT_ALIAS
from haystack.query import SearchQuerySet
from djangocms_blog.models import Post
from .base import BaseTest
class BlogIndexingTests(BaseTest):
def setUp(self):
self.get_pages()
def test_blog_post_is_indexed_using_prepare(self):
"""This tests the indexing path way used by update_index mgmt command"""
post = self._get_post(self._post_data[0]['en'])
post = self._get_post(self._post_data[0]['it'], post, 'it')
index = self.get_post_index()
index.index_queryset(DEFAULT_ALIAS) # initialises index._backend_alias
indexed = index.prepare(post)
self.assertEqual(post.get_title(), indexed['title'])
self.assertEqual(post.get_description(), indexed['description'])
self.assertEqual('First post First post first line This is the description category 1', indexed['text'])
self.assertEqual(post.get_absolute_url(), indexed['url'])
self.assertEqual(post.date_published.strftime("%Y-%m-%d %H:%M:%S"), indexed['pub_date'])
def test_blog_post_is_indexed_using_update_object(self):
"""This tests the indexing path way used by the RealTimeSignalProcessor"""
post = self._get_post(self._post_data[0]['en'])
post = self._get_post(self._post_data[0]['it'], post, 'it')
index = self.get_post_index()
index.update_object(post, using=DEFAULT_ALIAS)
indexed = index.prepared_data
self.assertEqual(post.get_title(), indexed['title'])
self.assertEqual(post.get_description(), indexed['description'])
self.assertEqual('First post First post first line This is the description category 1', indexed['text'])
self.assertEqual(post.get_absolute_url(), indexed['url'])
self.assertEqual(post.date_published.strftime("%Y-%m-%d %H:%M:%S"), indexed['pub_date'])
def test_searchqueryset(self):
posts = self.get_posts()
all_results = SearchQuerySet().models(Post)
self.assertEqual(len(posts), len(all_results))
| bsd-3-clause | Python |
bcecf5626768c8399e9293fe12a25511eeb6c52d | Bump version to 3.1-beta | godotengine/godot,honix/godot,pkowal1982/godot,Paulloz/godot,josempans/godot,Valentactive/godot,ZuBsPaCe/godot,MarianoGnu/godot,firefly2442/godot,firefly2442/godot,Faless/godot,okamstudio/godot,DmitriySalnikov/godot,godotengine/godot,groud/godot,ZuBsPaCe/godot,josempans/godot,guilhermefelipecgs/godot,ex/godot,okamstudio/godot,Paulloz/godot,sanikoyes/godot,josempans/godot,Valentactive/godot,Shockblast/godot,akien-mga/godot,Zylann/godot,Valentactive/godot,Zylann/godot,ex/godot,Shockblast/godot,Valentactive/godot,okamstudio/godot,vkbsb/godot,ZuBsPaCe/godot,vkbsb/godot,guilhermefelipecgs/godot,Valentactive/godot,Shockblast/godot,okamstudio/godot,Shockblast/godot,Shockblast/godot,guilhermefelipecgs/godot,Paulloz/godot,vnen/godot,sanikoyes/godot,vnen/godot,honix/godot,DmitriySalnikov/godot,Faless/godot,pkowal1982/godot,vkbsb/godot,godotengine/godot,BastiaanOlij/godot,ZuBsPaCe/godot,firefly2442/godot,pkowal1982/godot,akien-mga/godot,okamstudio/godot,josempans/godot,MarianoGnu/godot,vkbsb/godot,DmitriySalnikov/godot,vnen/godot,vkbsb/godot,vnen/godot,Zylann/godot,vkbsb/godot,akien-mga/godot,vnen/godot,vnen/godot,groud/godot,godotengine/godot,godotengine/godot,vkbsb/godot,MarianoGnu/godot,Valentactive/godot,godotengine/godot,godotengine/godot,honix/godot,BastiaanOlij/godot,MarianoGnu/godot,DmitriySalnikov/godot,Zylann/godot,BastiaanOlij/godot,Faless/godot,guilhermefelipecgs/godot,pkowal1982/godot,BastiaanOlij/godot,vnen/godot,Zylann/godot,ex/godot,ZuBsPaCe/godot,okamstudio/godot,okamstudio/godot,ZuBsPaCe/godot,honix/godot,vkbsb/godot,ZuBsPaCe/godot,DmitriySalnikov/godot,godotengine/godot,pkowal1982/godot,Faless/godot,ex/godot,honix/godot,akien-mga/godot,sanikoyes/godot,okamstudio/godot,DmitriySalnikov/godot,ZuBsPaCe/godot,Zylann/godot,ex/godot,guilhermefelipecgs/godot,honix/godot,DmitriySalnikov/godot,groud/godot,MarianoGnu/godot,Paulloz/godot,MarianoGnu/godot,Paulloz/godot,Faless/godot,Paulloz/godot,josempans/godot,guilhermefelipecgs/godot,Zylann/godot,Shockblast/godot,BastiaanOlij/godot,Valentactive/godot,Shockblast/godot,BastiaanOlij/godot,vnen/godot,Faless/godot,MarianoGnu/godot,groud/godot,firefly2442/godot,BastiaanOlij/godot,Shockblast/godot,sanikoyes/godot,Zylann/godot,pkowal1982/godot,firefly2442/godot,groud/godot,sanikoyes/godot,josempans/godot,akien-mga/godot,BastiaanOlij/godot,Paulloz/godot,akien-mga/godot,sanikoyes/godot,guilhermefelipecgs/godot,firefly2442/godot,josempans/godot,ex/godot,guilhermefelipecgs/godot,akien-mga/godot,groud/godot,sanikoyes/godot,pkowal1982/godot,Valentactive/godot,pkowal1982/godot,Faless/godot,firefly2442/godot,akien-mga/godot,MarianoGnu/godot,ex/godot,firefly2442/godot,josempans/godot,okamstudio/godot,Faless/godot,ex/godot,okamstudio/godot,sanikoyes/godot | version.py | version.py | short_name = "godot"
name = "Godot Engine"
major = 3
minor = 1
status = "beta"
module_config = ""
| short_name = "godot"
name = "Godot Engine"
major = 3
minor = 1
status = "alpha"
module_config = ""
| mit | Python |
ca9f91aaf5fd15ab58875363858fd5f73305a410 | comment complex_test_header and pointcloud | eEcoLiDAR/eEcoLiDAR | laserchicken/test_utils.py | laserchicken/test_utils.py | from laserchicken.keys import *
import numpy as np
import datetime as dt
def generate_simple_test_point_cloud():
# This simple_test_point cloud and the simple_test_header should be in sync. Some tests depend on it.
pc = {point: {'x': {'type': 'float', 'data': np.array([1, 2, 3])},
'y': {'type': 'float', 'data': np.array([20, 30, 40])},
'z': {'type': 'float', 'data': np.array([300, 400, 500])}}}
return pc
def generate_complex_test_point_cloud():
# This complex_test_point cloud and the complex_test_header should be in sync. Some tests depend on it.
dto= dt.datetime(2018,1,18,16,1,0)
pc = {point: {'x': {'type': 'float', 'data': np.array([1, 2, 3, 4, 5])},
'y': {'type': 'float', 'data': np.array([2, 3, 4, 5, 6])},
'z': {'type': 'float', 'data': np.array([3, 4, 5, 6, 7])},
'return': {'type': 'int', 'data': np.array([1, 1, 2, 2, 1])}
},
point_cloud: {'offset': {'type': 'double', 'data': 12.1}},
provenance: [{'time' : dto , 'module' : 'filter'}]
}
return pc
def generate_simple_test_header():
# This simple_test_header cloud and the simple_test_point should be in sync. Some tests depend on it.
header = """ply
format ascii 1.0
element vertex 3
property float x
property float y
property float z
"""
return header
def generate_complex_test_header():
# This complex_test_header cloud and the complex_test_point should be in sync. Some tests depend on it.
comment = {"time" : dt.datetime(2018,1,18,16,1,0),"module" : "filter"}
header = """ply
format ascii 1.0
comment [
comment %s
comment ]
element vertex 5
property float x
property float y
property float z
property int return
element pointcloud 1
property double offset
""" % str(comment)
return header
def generate_simple_test_data():
data = """1 20 300
2 30 400
3 40 500
"""
return data
def generate_complex_test_data():
data = """1 2 3 1
2 3 4 1
3 4 5 2
4 5 6 2
5 6 7 1
12.1
"""
return data
| from laserchicken.keys import *
import numpy as np
import datetime as dt
def generate_simple_test_point_cloud():
# This simple_test_point cloud and the simple_test_header should be in sync. Some tests depend on it.
pc = {point: {'x': {'type': 'float', 'data': np.array([1, 2, 3])},
'y': {'type': 'float', 'data': np.array([20, 30, 40])},
'z': {'type': 'float', 'data': np.array([300, 400, 500])}}}
return pc
def generate_complex_test_point_cloud():
dto= dt.datetime(2018,1,18,16,1,0)
pc = {point: {'x': {'type': 'float', 'data': np.array([1, 2, 3, 4, 5])},
'y': {'type': 'float', 'data': np.array([2, 3, 4, 5, 6])},
'z': {'type': 'float', 'data': np.array([3, 4, 5, 6, 7])},
'return': {'type': 'int', 'data': np.array([1, 1, 2, 2, 1])}
},
point_cloud: {'offset': {'type': 'double', 'data': 12.1}},
provenance: [{'time' : dto , 'module' : 'filter'}]
}
return pc
def generate_simple_test_header():
# This simple_test_header cloud and the simple_test_point should be in sync. Some tests depend on it.
header = """ply
format ascii 1.0
element vertex 3
property float x
property float y
property float z
"""
return header
def generate_complex_test_header():
comment = {"time" : dt.datetime(2018,1,18,16,1,0),"module" : "filter"}
header = """ply
format ascii 1.0
comment [
comment %s
comment ]
element vertex 5
property float x
property float y
property float z
property int return
element pointcloud 1
property double offset
""" % str(comment)
return header
def generate_simple_test_data():
data = """1 20 300
2 30 400
3 40 500
"""
return data
def generate_complex_test_data():
data = """1 2 3 1
2 3 4 1
3 4 5 2
4 5 6 2
5 6 7 1
12.1
"""
return data
| apache-2.0 | Python |
1b4fc9471297dbc704cf0efaac54fce5891014d7 | make the proxy command (relay/server) required | lincheney/ssh-forward-proxy,lincheney/ssh-forward-proxy | bin/ssh-forward-proxy.py | bin/ssh-forward-proxy.py | import logging
import argparse
logging.basicConfig(level=logging.INFO)
import ssh_forward_proxy as ssh
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Forward all SSH requests to remote but authenticating as the proxy')
parser.add_argument('-i', dest='identity_file', help='Path to identity file (same as ssh -i)')
subparsers = parser.add_subparsers(dest='command')
subparsers.required = True
sub = subparsers.add_parser('relay', help='Proxy SSH traffic on STDIN to the remote')
sub.add_argument('port', type=int, help='Remote port')
sub.add_argument('host', help='Remote host')
sub.add_argument('user', help='Username')
sub = subparsers.add_parser('server', help='Run a standalone SSH server that forwards traffic to the remote')
sub.add_argument('port', nargs='?', default=ssh.SSH_PORT, type=int, help='Port to run server on (default: {})'.format(ssh.SSH_PORT))
sub.add_argument('host', nargs='?', default='', help='Host to bind server to')
args = parser.parse_args()
kwargs = dict(
key_filename=args.identity_file,
)
if args.command == 'relay':
# no logging in relay since stderr is piped to SSH client
logging.disable(level=logging.CRITICAL)
ssh.Proxy(username=args.user, host=args.host, port=args.port, **kwargs)
elif args.command == 'server':
ssh.run_server(args.host, args.port, worker=ssh.ProxyServer, **kwargs)
| import logging
import argparse
logging.basicConfig(level=logging.INFO)
import ssh_forward_proxy as ssh
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Forward all SSH requests to remote but authenticating as the proxy')
parser.add_argument('-i', dest='identity_file', help='Path to identity file (same as ssh -i)')
subparsers = parser.add_subparsers(dest='command')
sub = subparsers.add_parser('relay', help='Proxy SSH traffic on STDIN to the remote')
sub.add_argument('port', type=int, help='Remote port')
sub.add_argument('host', help='Remote host')
sub.add_argument('user', help='Username')
sub = subparsers.add_parser('server', help='Run a standalone SSH server that forwards traffic to the remote')
sub.add_argument('port', nargs='?', default=ssh.SSH_PORT, type=int, help='Port to run server on (default: {})'.format(ssh.SSH_PORT))
sub.add_argument('host', nargs='?', default='', help='Host to bind server to')
args = parser.parse_args()
kwargs = dict(
key_filename=args.identity_file,
)
if args.command == 'relay':
# no logging in relay since stderr is piped to SSH client
logging.disable(level=logging.CRITICAL)
ssh.Proxy(username=args.user, host=args.host, port=args.port, **kwargs)
elif args.command == 'server':
ssh.run_server(args.host, args.port, worker=ssh.ProxyServer, **kwargs)
| mit | Python |
c354f387023a6d7b7aebbd2b61fb7336fe72f346 | Bump to next v.0.13.0a0 (#217) | barrachri/aiodocker,gaopeiliang/aiodocker,barrachri/aiodocker,gaopeiliang/aiodocker,paultag/aiodocker,barrachri/aiodocker,gaopeiliang/aiodocker | aiodocker/__init__.py | aiodocker/__init__.py | from .docker import Docker
__version__ = '0.13.0a0'
__all__ = ("Docker", )
| from .docker import Docker
__version__ = '0.12.0'
__all__ = ("Docker", )
| mit | Python |
9d1d8729077a619d724812c6919d6126d5eedcd2 | Speed up test | Parsl/parsl,swift-lang/swift-e-lab,Parsl/parsl,Parsl/parsl,swift-lang/swift-e-lab,Parsl/parsl | parsl/tests/test_ipp/test_python_worker_fail.py | parsl/tests/test_ipp/test_python_worker_fail.py | """Testing bash apps
"""
import parsl
from parsl import *
import time
import argparse
# parsl.set_stream_logger()
workers = IPyParallelExecutor()
dfk = DataFlowKernel(executors=[workers])
@App('python', dfk)
def import_echo(x, string, sleep=0, stdout=None):
import time
time.sleep(sleep)
print(string)
return x * 5
def test_parallel_for(n=10):
d = {}
start = time.time()
for i in range(0, n):
d[i] = import_echo(2, "hello", sleep=2)
# time.sleep(0.01)
assert len(
d.keys()) == n, "Only {0}/{1} keys in dict".format(len(d.keys()), n)
[d[i].result() for i in d]
print("Duration : {0}s".format(time.time() - start))
print("[TEST STATUS] test_parallel_for [SUCCESS]")
return d
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--count", default="10",
help="Count of apps to launch")
parser.add_argument("-d", "--debug", action='store_true',
help="Count of apps to launch")
args = parser.parse_args()
if args.debug:
parsl.set_stream_logger()
x = test_parallel_for()
# x = test_parallel_for(int(args.count))
# x = test_stdout()
# raise_error(0)
| """Testing bash apps
"""
import parsl
from parsl import *
import time
import argparse
# parsl.set_stream_logger()
workers = IPyParallelExecutor()
dfk = DataFlowKernel(executors=[workers])
@App('python', dfk)
def import_echo(x, string, sleep=0, stdout=None):
import time
time.sleep(sleep)
print(string)
return x * 5
def test_parallel_for(n=10):
d = {}
start = time.time()
for i in range(0, n):
d[i] = import_echo(2, "hello", sleep=20)
# time.sleep(0.01)
assert len(
d.keys()) == n, "Only {0}/{1} keys in dict".format(len(d.keys()), n)
[d[i].result() for i in d]
print("Duration : {0}s".format(time.time() - start))
print("[TEST STATUS] test_parallel_for [SUCCESS]")
return d
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--count", default="10",
help="Count of apps to launch")
parser.add_argument("-d", "--debug", action='store_true',
help="Count of apps to launch")
args = parser.parse_args()
if args.debug:
parsl.set_stream_logger()
x = test_parallel_for()
# x = test_parallel_for(int(args.count))
# x = test_stdout()
# raise_error(0)
| apache-2.0 | Python |
b0bde22e3ff0d2df2773f41aeaf8eb0ba6d0fa3f | Allow a default value to be specified when fetching a field value | jskeet/gcloud-dotnet,jskeet/google-cloud-dotnet,googleapis/google-cloud-dotnet,googleapis/google-cloud-dotnet,jskeet/google-cloud-dotnet,googleapis/google-cloud-dotnet,jskeet/google-cloud-dotnet,jskeet/google-cloud-dotnet,jskeet/google-cloud-dotnet | tools/getapifield.py | tools/getapifield.py | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import sys
import argparse
parser = argparse.ArgumentParser(description='Fetches a field from a single API in the catalog')
parser.add_argument('file', help='File to load')
parser.add_argument('id', help='ID of API to fetch')
parser.add_argument('field', help='Field to find and output')
parser.add_argument('--default', help='Default value to output if field is not present')
args = parser.parse_args()
filename = sys.argv[1]
file = open(filename, "r")
catalog = json.load(file)
query = [api.get(args.field) for api in catalog["apis"] if api["id"] == args.id]
if len(query) != 1:
raise Exception(f"API {args.id} not found (or has duplicate definitions)")
elif not query[0] and args.default:
print(args.default)
elif not query[0]:
raise Exception(f"API {args.id} has no field {args.field}")
else:
print(query[0])
| # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import sys
import argparse
parser = argparse.ArgumentParser(description='Fetches a field from a single API in the catalog')
parser.add_argument('file', help='File to load')
parser.add_argument('id', help='ID of API to fetch')
parser.add_argument('field', help='Field to find and output')
args = parser.parse_args()
filename = sys.argv[1]
file = open(filename, "r")
catalog = json.load(file)
query = [api.get(args.field) for api in catalog["apis"] if api["id"] == args.id]
if len(query) != 1:
raise Exception(f"API {args.id} not found (or has duplicate definitions)")
elif not query[0]:
raise Exception(f"API {args.id} has no field {args.field}")
else:
print(query[0])
| apache-2.0 | Python |
6d7f7ebb913bf99d36c551ef3105c98c7150c68b | Update version_info.py - 20 | postpdm/ich_bau,postpdm/ich_bau | ich_bau/templatetags/version_info.py | ich_bau/templatetags/version_info.py | from django import template
register = template.Library()
@register.simple_tag(name='site_version_info')
def site_version_info():
return 'v0.0020 at 20.03.2020'
| from django import template
register = template.Library()
@register.simple_tag(name='site_version_info')
def site_version_info():
return 'v0.0019 at 05.02.2020'
| apache-2.0 | Python |
f51915a6c373de39785d8273b2a9f6e11ff67b9e | Test for no pairs from one particle | benwaugh/dimuon | test_dimuon.py | test_dimuon.py | from dimuon import find_pairs
def test_find_pairs():
particles = None
pairs = find_pairs(particles)
def test_no_particles():
particles = []
pairs = find_pairs(particles)
assert len(pairs) == 0
def test_one_particle():
particles = [None]
pairs = find_pairs(particles)
assert len(pairs) == 0
| from dimuon import find_pairs
def test_find_pairs():
particles = None
pairs = find_pairs(particles)
def test_no_particles():
particles = []
pairs = find_pairs(particles)
assert len(pairs) == 0
| mit | Python |
a84e6e2a31408bcefbb6ecb5ba354e70bb870f56 | use slightly smaller textarea | adieu/allbuttonspressed,adieu/allbuttonspressed | minicms/admin.py | minicms/admin.py | from .models import Block, Page
from django.contrib import admin
from django.contrib.admin.widgets import AdminTextareaWidget
from django.db import models
# The default TextField doesn't have enough rows
class UsableTextarea(AdminTextareaWidget):
def __init__(self, attrs=None):
default_attrs = {'rows': '32'}
if attrs:
default_attrs.update(attrs)
super(UsableTextarea, self).__init__(default_attrs)
class BaseAdmin(admin.ModelAdmin):
formfield_overrides = {
models.TextField: {'widget': UsableTextarea},
}
class PageAdmin(BaseAdmin):
fields = ('url', 'title', 'content')
list_display = ('url', 'title')
search_fields = ('url',)
ordering = ('url',)
class BlockAdmin(BaseAdmin):
list_display = ('name',)
search_fields = ('name',)
ordering = ('name',)
admin.site.register(Page, PageAdmin)
admin.site.register(Block, BlockAdmin)
| from .models import Block, Page
from django.contrib import admin
from django.contrib.admin.widgets import AdminTextareaWidget
from django.db import models
# The default TextField doesn't have enough rows
class UsableTextarea(AdminTextareaWidget):
def __init__(self, attrs=None):
default_attrs = {'rows': '40'}
if attrs:
default_attrs.update(attrs)
super(UsableTextarea, self).__init__(default_attrs)
class BaseAdmin(admin.ModelAdmin):
formfield_overrides = {
models.TextField: {'widget': UsableTextarea},
}
class PageAdmin(BaseAdmin):
fields = ('url', 'title', 'content')
list_display = ('url', 'title')
search_fields = ('url',)
ordering = ('url',)
class BlockAdmin(BaseAdmin):
list_display = ('name',)
search_fields = ('name',)
ordering = ('name',)
admin.site.register(Page, PageAdmin)
admin.site.register(Block, BlockAdmin)
| bsd-3-clause | Python |
6385b19090df0f36cbd16c96ee6ab763441ae460 | add test for passing parameters | k-bx/mockstar,k-bx/mockstar | mockstar_test.py | mockstar_test.py | # -*- coding: utf-8 -*-
import unittest
from unittest import TestCase
from mock import Mock
from mock import patch
from mockstar import p
from mockstar import DotDict
def side_effect_one():
pass
def side_effect_two():
pass
def side_effect_three():
val = side_effect_four()
return val * 2
def side_effect_four():
return 4
def side_effect_five(n):
return n
class TestDotDict(TestCase):
def test_should_setattr_getattr(self):
d = DotDict()
d['foo'] = 'something'
# do
result = d.foo
self.assertEquals(result, 'something')
class TestPatch(TestCase):
@p(__name__ + '.side_effect_one')
@p(__name__ + '.side_effect_two')
def test_should_mock_to_kw(self, se):
self.assertIsInstance(se.side_effect_one, Mock)
self.assertIsInstance(se.side_effect_two, Mock)
@p(__name__ + '.side_effect_four')
def test_should_mock_inner_call(self, se):
se.side_effect_four.return_value = 1
# do
result = side_effect_three()
self.assertEquals(result, 2)
@p(__name__ + '.side_effect_five', autospec=True)
@patch('mockstar.patch')
def test_should_pass_mock_parameters(self, mockstar_patch_mock, se):
se.side_effect_five(10)
self.assertRaises(TypeError, lambda: se.side_effect_five())
if __name__ == '__main__':
unittest.main()
| # -*- coding: utf-8 -*-
import unittest
from unittest import TestCase
from mock import Mock
from mockstar import p
from mockstar import DotDict
def side_effect_one():
pass
def side_effect_two():
pass
def side_effect_three():
val = side_effect_four()
return val * 2
def side_effect_four():
return 4
class TestDotDict(TestCase):
def test_should_setattr_getattr(self):
d = DotDict()
d['foo'] = 'something'
# do
result = d.foo
self.assertEquals(result, 'something')
class TestPatch(TestCase):
@p(__name__ + '.side_effect_one')
@p(__name__ + '.side_effect_two')
def test_should_mock_to_kw(self, se):
self.assertIsInstance(se.side_effect_one, Mock)
self.assertIsInstance(se.side_effect_two, Mock)
@p(__name__ + '.side_effect_four')
def test_should_mock_inner_call(self, se):
se.side_effect_four.return_value = 1
# do
result = side_effect_three()
self.assertEquals(result, 2)
if __name__ == '__main__':
unittest.main()
| bsd-2-clause | Python |
e7544073a4496d4ba9514f85e8957c54d32cc201 | Fix Typo | iGene/igene_bot,aver803bath5/igene_bot | models/google.py | models/google.py | # -*- coding: utf-8 -*-
from bs4 import BeautifulSoup
import logging
import re
import requests
import urllib.parse
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
logger = logging.getLogger(__name__)
def google(bot, update):
search = update.message.text
search = re.sub(r'^(?i)google ','',search)
logger.info("Google %s" %search)
headers = {'User-Agent': 'Mozilla/5.0'}
r = requests.get('https://www.google.com/search?q='+ search, headers)
soup = BeautifulSoup(r.text, "html.parser")
result = soup.find('h3', {'class': 'r'}).find('a').attrs['href']
result = urllib.parse.unquote(result)
if_http_start_regex = re.compile('^http')
if_http_start = if_http_start_regex.match(str(result))
if if_http_start == None:
remove_url_q_re = re.compile('^\/url\?q=')
remove_url_sa_re = re.compile('\&sa.+')
result = re.sub(remove_url_q_re, '', result)
result = re.sub(remove_url_sa_re, '', result)
update.message.reply_text(result)
else:
update.message.reply_text(result)
def images(bot, update):
search = update.message.text
search = re.sub(r'%(?i)image ','',search)
logger.info("Google image search %s" %search)
headers = {'User-Agent': 'Mozilla/5.0'}
r = requests.get('https://www.google.com/search?tbm=isch&q='+ search, headers)
soup = BeautifulSoup(r.text, "html.parser")
images = [a['src'] for a in soup.find_all("img", {"src": re.compile("gstatic.com")})]
update.message.reply_text(images[0])
def correct(bot, update):
search = update.message.text
user = update.message.from_user.username
logger.info("Auto correct")
headers = {'User-Agent': 'Mozilla/5.0'}
r = requests.get('https://www.google.com/search?q='+ search, headers)
soup = BeautifulSoup(r.text, "html.parser")
result = soup.find('a',{'class': 'spell'})
if not result is None:
update.message.reply_text(user+' 的意思也許是\n'+result.text)
| # -*- coding: utf-8 -*-
from bs4 import BeautifulSoup
import logging
import re
import requests
import urllib.parse
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
logger = logging.getLogger(__name__)
def google(bot, update):
search = update.message.text
search = re.sub(r'^(?i)google ','',search)
logger.info("Google %s" %search)
headers = {'User-Agent': 'Mozilla/5.0'}
r = requests.get('https://www.google.com/search?q='+ search, headers)
soup = BeautifulSoup(r.text, "html.parser")
result = soup.find('h3', {'class': 'r'}).find('a').attrs['href']
result = urllib.parse.unquote(result)
if_http_start_regex = re.compile('^http')
if_http_start = if_http_start_regex.match(str(result))
if if_http_start == None:
remove_url_q_re = re.compile('^\/url\?q=')
remove_url_sa_re = re.compile('\&sa.+')
result = re.sub(remove_url_q_re, '', result)
result = re.sub(remove_url_sa_re, '', result)
update.message.reply_text(result)
else:
update.message.reply_text(result)
def images(bot, update):
search = update.message.text
search = re.sub(r'%(?i)image ','',search)
logger.info("Google image search %s" %search)
headers = {'User-Agent': 'Mozilla/5.0'}
r = requests.get('https://www.google.com/search?tbm=isch&q='+ search, headers)
soup = BeautifulSoup(r.text, "html.parser")
images = [a['src'] for a in soup.find_all("img", {"src": re.compile("gstatic.com")})]
update.message.reply_text(images[0])
def correct(bot, update):
search = update.message.text
user = update.message.from_user.username
logger.info("Auto correct")
headers = {'User-Agent': 'Mozilla/5.0'}
r = requests.get('https://www.google.com/search?q='+ search, headers)
soup = BeautifulSoup(r.text, "html.parser")
result = soup.find('a',{'class': 'spell'})
if not result is None:
update.message.reply_text(user+' 的意思也許是\n'++result.text)
| mit | Python |
a800edd62b53b7546f98e56dcc9775933799befd | bump to 0.3.5. | tsuru/tsuru-circus | tsuru/__init__.py | tsuru/__init__.py | # Copyright 2013 tsuru-circus authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
__version__ = "0.3.5"
| # Copyright 2013 tsuru-circus authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
__version__ = "0.3.4"
| bsd-3-clause | Python |
25aba1a9cd0aa57b35c6bc9e5083a601585a214e | bump to 0.4.2. | tsuru/tsuru-circus | tsuru/__init__.py | tsuru/__init__.py | # Copyright 2013 tsuru-circus authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
__version__ = "0.4.2"
| # Copyright 2013 tsuru-circus authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
__version__ = "0.4.1"
| bsd-3-clause | Python |
240ebf998ebd633d6c5d8f90f46892071522759e | Fix #1. Supporting python3 | yychen/twd97 | twd97/__init__.py | twd97/__init__.py | from twd97.converter import fromwgs84, towgs84
| from converter import fromwgs84, towgs84
| mit | Python |
82bd501f89d3a228c3de9a2f355266b374c35a54 | Add current project path to the first position of sys.modules | bufferx/twork,bufferx/twork | twork/assembly.py | twork/assembly.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2012 Zhang ZY<http://idupx.blogspot.com/>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
''' set classpath
'''
import os
import sys
CURRENT_PATH = os.path.dirname(os.path.realpath(__file__))
PROJECT_PATH = os.path.realpath(os.path.join(CURRENT_PATH, '..'))
if PROJECT_PATH not in sys.path:
sys.path.insert(0, PROJECT_PATH)
def main():
print 'CURRENT_PATH:', CURRENT_PATH
print 'PROJECT_PATH:', PROJECT_PATH
if __name__ == '__main__':
main()
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2012 Zhang ZY<http://idupx.blogspot.com/>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
''' set classpath
'''
import os
import sys
CURRENT_PATH = os.path.dirname(os.path.realpath(__file__))
PROJECT_PATH = os.path.realpath(os.path.join(CURRENT_PATH, '..'))
if PROJECT_PATH not in sys.path:
sys.path.append(PROJECT_PATH)
def main():
print 'CURRENT_PATH:', CURRENT_PATH
print 'PROJECT_PATH:', PROJECT_PATH
if __name__ == '__main__':
main()
| apache-2.0 | Python |
1b05a5359781bf972243697dcf7187de3eb5503c | Fix test cases | shichao-an/twitter-photos | twphotos/tests.py | twphotos/tests.py | import os
os.environ['TWPHOTOS_TEST_CONFIG'] = '0'
import shutil
from unittest import TestCase
from .settings import PROJECT_PATH
from .photos import TwitterPhotos
TEST_OUTPUT = 'test-output'
TEST_USER = 'WIRED'
class TestPhotos(TestCase):
"""Test TwitterPhotos class programmatically"""
def setUp(self):
d = os.path.join(PROJECT_PATH, TEST_OUTPUT)
if not os.path.exists(d):
os.makedirs(d)
self.directory = d
def test_credentials(self):
pass
def test_get(self):
twphotos = TwitterPhotos(TEST_USER)
p = twphotos.get(count=20)
def test_download(self):
twphotos = TwitterPhotos(user=TEST_USER, outdir=self.directory)
p = twphotos.get(count=20)
twphotos.download()
self.assertEqual(len(p), len(os.listdir(self.directory)))
def tearDown(self):
shutil.rmtree(self.directory)
| import os
import shutil
from unittest import TestCase
from .settings import PROJECT_PATH
from .photos import TwitterPhotos
TEST_OUTPUT = 'test-output'
TEST_USER = 'WIRED'
class TestPhotos(TestCase):
"""Test TwitterPhotos class programmatically"""
def setUp(self):
d = os.path.join(PROJECT_PATH, TEST_OUTPUT)
if not os.path.exists(d):
os.makedirs(d)
self.directory = d
def test_credentials(self):
pass
def test_get(self):
twphotos = TwitterPhotos(TEST_USER)
p = twphotos.get(count=20)
def test_download(self):
twphotos = TwitterPhotos(user=TEST_USER, outdir=self.directory)
p = twphotos.get(count=20)
p = twphotos.download()
self.assertEqual(len(p), len(os.listdir(self.directory)))
def tearDown(self):
shutil.rmtree(self.directory)
| bsd-2-clause | Python |
182cad51f830e21320f2442804b674cd32306ba8 | Sort navigation links by position and alphabetical order, closes: #1797. | SmartJog/webengine,SmartJog/webengine | utils/__init__.py | utils/__init__.py | import os
def get_valid_plugins():
"""
Returns a list of valid webengine plugins.
Returns: [('name', <module 'webengine.name'>), ...]
"""
webengine = __import__('webengine')
def __isplugin(mod_name):
""" Nested method of get_valid_plugins, tries to import webengine.<mod_name>.urls. """
mod = None
try:
__import__('webengine.' + mod_name + '.urls', {}, {}, [''])
mod = getattr(webengine, mod_name)
except ImportError:
return None
return mod_name, mod
wdir = webengine.__path__[0]
# Map os.listdir(wdir) to isplugin, and then "filter" elements that are None
return [dir for dir in map(__isplugin, [d for d in os.listdir(wdir) if os.path.isdir(os.path.join(wdir, d))]) if dir]
def webengine_template_processor(request):
"""
This method is called by the RequestContext() object.
It adds to the template variables the profile, etc..
Each key in the returned dict will be available as is
when processing the template.
Add everything you need in every template.
"""
from django.conf import settings
modules = get_valid_plugins()
menus = []
for mod in modules:
try:
m = mod[1].urls.menus
menus.append(m)
except AttributeError:
continue
#Sort menus by position and alphabetical order
def cmp_menu(x,y):
if x['position'] > y['position']:
return 1
elif x['position'] == y['position']:
return x['title'] > y['title']
else:
return -1
menus.sort(cmp_menu)
return {
'profile': settings.PROFILE,
'menus': menus,
}
| import os
def get_valid_plugins():
"""
Returns a list of valid webengine plugins.
Returns: [('name', <module 'webengine.name'>), ...]
"""
webengine = __import__('webengine')
def __isplugin(mod_name):
""" Nested method of get_valid_plugins, tries to import webengine.<mod_name>.urls. """
mod = None
try:
__import__('webengine.' + mod_name + '.urls', {}, {}, [''])
mod = getattr(webengine, mod_name)
except ImportError:
return None
return mod_name, mod
wdir = webengine.__path__[0]
# Map os.listdir(wdir) to isplugin, and then "filter" elements that are None
return [dir for dir in map(__isplugin, [d for d in os.listdir(wdir) if os.path.isdir(os.path.join(wdir, d))]) if dir]
def webengine_template_processor(request):
"""
This method is called by the RequestContext() object.
It adds to the template variables the profile, etc..
Each key in the returned dict will be available as is
when processing the template.
Add everything you need in every template.
"""
from django.conf import settings
modules = get_valid_plugins()
menus = []
for mod in modules:
try:
m = mod[1].urls.menus
menus.append(m)
except AttributeError:
continue
return {
'profile': settings.PROFILE,
'menus': menus,
}
| lgpl-2.1 | Python |
8067a67998273996d086f715748b2d7f09790bb0 | Use direct jpg data instead of jpg file | OTL/rostensorflow | image_recognition.py | image_recognition.py | import rospy
from sensor_msgs.msg import Image
from std_msgs.msg import String
from cv_bridge import CvBridge
import cv2
import numpy as np
import tensorflow as tf
from tensorflow.models.image.imagenet import classify_image
class RosTensorFlow():
def __init__(self):
classify_image.maybe_download_and_extract()
self._session = tf.Session()
classify_image.create_graph()
self._cv_bridge = CvBridge()
self._sub = rospy.Subscriber('image', Image, self.callback, queue_size=1)
self._pub = rospy.Publisher('result', String, queue_size=1)
self.score_threshold = rospy.get_param('~score_threshold', 0.1)
self.use_top_k = rospy.get_param('~use_top_k', 5)
def callback(self, image_msg):
cv_image = self._cv_bridge.imgmsg_to_cv2(image_msg, "bgr8")
# copy from
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/models/image/imagenet/classify_image.py
image_data = cv2.imencode('.jpg', cv_image)[1].tostring()
# Creates graph from saved GraphDef.
softmax_tensor = self._session.graph.get_tensor_by_name('softmax:0')
predictions = self._session.run(
softmax_tensor, {'DecodeJpeg/contents:0': image_data})
predictions = np.squeeze(predictions)
# Creates node ID --> English string lookup.
node_lookup = classify_image.NodeLookup()
top_k = predictions.argsort()[-self.use_top_k:][::-1]
for node_id in top_k:
human_string = node_lookup.id_to_string(node_id)
score = predictions[node_id]
if score > self.score_threshold:
rospy.loginfo('%s (score = %.5f)' % (human_string, score))
self._pub.publish(human_string)
def main(self):
rospy.spin()
if __name__ == '__main__':
rospy.init_node('rostensorflow')
tensor = RosTensorFlow()
tensor.main()
| import rospy
from sensor_msgs.msg import Image
from std_msgs.msg import String
from cv_bridge import CvBridge, CvBridgeError
import cv2
import numpy as np
import tensorflow as tf
from tensorflow.models.image.imagenet import classify_image
class RosTensorFlow():
def __init__(self):
classify_image.maybe_download_and_extract()
self._sub = rospy.Subscriber('image', Image, self.callback, queue_size=1)
self._pub = rospy.Publisher('result', String, queue_size=1)
def callback(self, image_msg):
bridge = CvBridge()
print 'start'
cv_image = bridge.imgmsg_to_cv2(image_msg, "bgr8")
cv2.imwrite('tmp.jpg', cv_image)
# copy from
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/models/image/imagenet/classify_image.py
image_data = tf.gfile.FastGFile('tmp.jpg', 'rb').read()
# Creates graph from saved GraphDef.
classify_image.create_graph()
with tf.Session() as sess:
softmax_tensor = sess.graph.get_tensor_by_name('softmax:0')
predictions = sess.run(softmax_tensor,
{'DecodeJpeg/contents:0': image_data})
predictions = np.squeeze(predictions)
# Creates node ID --> English string lookup.
node_lookup = classify_image.NodeLookup()
num_top_predictions = 5
top_k = predictions.argsort()[-num_top_predictions:][::-1]
for node_id in top_k:
human_string = node_lookup.id_to_string(node_id)
score = predictions[node_id]
print('%s (score = %.5f)' % (human_string, score))
self._pub.publish(human_string)
def main(self):
rospy.spin()
if __name__ == '__main__':
rospy.init_node('rostensorflow')
tensor = RosTensorFlow()
tensor.main()
| apache-2.0 | Python |
fb53986a1e20859e421f4c86c664fd862e3b882e | fix mongodb | shmiko/big-fat-python-tests,shmiko/big-fat-python-tests | mongodb_query.py | mongodb_query.py | db.schedule.find("bookings" : {"$elemMatch" : { "date" : new ISODate("2016-08-09T10:00:00.000Z")}})
>> { "bookings" : [
{
"event" : "MongoDB On Site Interveiw",
"date": ISODate("2016-08-09T10:00:00.000Z")
}
]
}
db.schedule.insert(
{ "bookings" : [
{
"event" : "MongoDB On Site Interveiw",
"date": ISODate("2016-08-09T10:00:00.000Z")
}
]
}
)
db.schedule.find()
{ "_id" : ObjectId("579af1b356bf8dd7f454ae3e"), "bookings" : [ { "event" : "MongoDB On Site Interveiw", "date" : ISODate("2016-08-09T10:00:00Z") } ] }
db.restaurants.insert(
{
"address" : {
"street" : "2 Avenue",
"zipcode" : "10075",
"building" : "1480",
"coord" : [ -73.9557413, 40.7720266 ]
},
"borough" : "Manhattan",
"cuisine" : "Italian",
"grades" : [
{
"date" : ISODate("2014-10-01T00:00:00Z"),
"grade" : "A",
"score" : 11
},
{
"date" : ISODate("2014-01-16T00:00:00Z"),
"grade" : "B",
"score" : 17
}
],
"name" : "Vella",
"restaurant_id" : "41704620"
}
)
| db.schedule.find("bookings" : {"$elemMatch" : { "date" : new ISODate("2016-08-09T10:00:00.000Z")}})
>> { "bookings" : [
{
"event" : "MongoDB On Site Interveiw",
"date": ISODate("2016-08-09T10:00:00.000Z")
}
]
}
db.schedule.insert(
{ "bookings" : [
{
"event" : "MongoDB On Site Interveiw",
"date": ISODate("2016-08-09T10:00:00.000Z")
}
]
}
)
db.schedule.find()
{ "_id" : ObjectId("579af1b356bf8dd7f454ae3e"), "bookings" : [ { "event" : "MongoDB On Site Interveiw", "date" : ISODate("2016-08-09T10:00:00Z") } ] }
db.restaurants.insert(
{
"address" : {
"street" : "2 Avenue",
"zipcode" : "10075",
"building" : "1480",
"coord" : [ -73.9557413, 40.7720266 ]
},
"borough" : "Manhattan",
"cuisine" : "Italian",
"grades" : [
{
"date" : ISODate("2014-10-01T00:00:00Z"),
"grade" : "A",
"score" : 11
},
{
"date" : ISODate("2014-01-16T00:00:00Z"),
"grade" : "B",
"score" : 17
}
],
"name" : "Vella",
"restaurant_id" : "41704620"
}
)
db.restaurants.insert(
{
"address" : {
"street" : "2 Avenue",
"zipcode" : "10075",
"building" : "1480",
"coord" : [ -73.9557413, 40.7720266 ]
},
"borough" : "Manhattan",
"cuisine" : "Italian",
"grades" : [
{
"date" : ISODate("2014-10-01T00:00:00Z"),
"grade" : "A",
"score" : 11
},
{
"date" : ISODate("2014-01-16T00:00:00Z"),
"grade" : "B",
"score" : 17
}
],
"name" : "Vella",
"restaurant_id" : "41704620"
}
)
| apache-2.0 | Python |
be80c525fbb1dfb9cc1ea6a713fc2b152dfd5440 | fix mongodb | shmiko/big-fat-python-tests,shmiko/big-fat-python-tests | mongodb_query.py | mongodb_query.py | db.schedule.find("bookings" : {"$elemMatch" : { "date" : new ISODate("2016-08-09T10:00:00.000Z")}})
>> { "bookings" : [
{
"event" : "MongoDB On Site Interveiw",
"date": ISODate("2016-08-09T10:00:00.000Z")
}
]
}
db.schedule.insert(
{ "bookings" : [
{
"event" : "MongoDB On Site Interveiw",
"date": ISODate("2016-08-09T10:00:00.000Z")
}
]
}
)
db.schedule.find()
{ "_id" : ObjectId("579af1b356bf8dd7f454ae3e"), "bookings" : [ { "event" : "MongoDB On Site Interveiw", "date" : ISODate("2016-08-09T10:00:00Z") } ] }
db.restaurants.insert(
{
"address" : {
"street" : "2 Avenue",
"zipcode" : "10075",
"building" : "1480",
"coord" : [ -73.9557413, 40.7720266 ]
},
"borough" : "Manhattan",
"cuisine" : "Italian",
"grades" : [
{
"date" : ISODate("2014-10-01T00:00:00Z"),
"grade" : "A",
"score" : 11
},
{
"date" : ISODate("2014-01-16T00:00:00Z"),
"grade" : "B",
"score" : 17
}
],
"name" : "Vella",
"restaurant_id" : "41704620"
}
)
| db.schedule.find("bookings" : {"$elemMatch" : { "date" : new ISODate("2016-08-09T10:00:00.000Z")}})
>> { "bookings" : [
{
"event" : "MongoDB On Site Interveiw",
"date": ISODate("2016-08-09T10:00:00.000Z")
}
]
}
db.schedule.insert(
{ "bookings" : [
{
"event" : "MongoDB On Site Interveiw",
"date": ISODate("2016-08-09T10:00:00.000Z")
}
]
}
)
db.schedule.find()
{ "_id" : ObjectId("579af1b356bf8dd7f454ae3e"), "bookings" : [ { "event" : "MongoDB On Site Interveiw", "date" : ISODate("2016-08-09T10:00:00Z") } ] }
| apache-2.0 | Python |
f27d51beecd423335835b005d2938224102a7460 | remove unused import | RaRe-Technologies/smart_open,piskvorky/smart_open,RaRe-Technologies/smart_open | integration-tests/test_s3_readline.py | integration-tests/test_s3_readline.py | from smart_open import open
def read_lines(url, limit):
lines = []
with open(url, 'r', errors='ignore') as fin:
for i, l in enumerate(fin):
if i == limit:
break
lines.append(l)
return lines
def test(benchmark):
#
# This file is around 850MB.
#
url = (
's3://commoncrawl/crawl-data/CC-MAIN-2019-51/segments/1575541319511.97'
'/warc/CC-MAIN-20191216093448-20191216121448-00559.warc.gz'
)
limit = 1000000
lines = benchmark(read_lines, url, limit)
assert len(lines) == limit
| import sys
from smart_open import open
def read_lines(url, limit):
lines = []
with open(url, 'r', errors='ignore') as fin:
for i, l in enumerate(fin):
if i == limit:
break
lines.append(l)
return lines
def test(benchmark):
#
# This file is around 850MB.
#
url = (
's3://commoncrawl/crawl-data/CC-MAIN-2019-51/segments/1575541319511.97'
'/warc/CC-MAIN-20191216093448-20191216121448-00559.warc.gz'
)
limit = 1000000
lines = benchmark(read_lines, url, limit)
assert len(lines) == limit
| mit | Python |
6482303cee3c6678cfed1f3a22dccd3758093c12 | Expand config to include jarvis data directories | clb6/jarvis-cli | jarvis_cli/config.py | jarvis_cli/config.py | import os
import configparser
from jarvis_cli.exceptions import JarvisCliConfigError
from jarvis_cli.client import DBConn
JARVIS_CLI_DIRECTORY = os.path.join(os.environ["HOME"], ".jarvis")
def _get_config(environment):
config = configparser.ConfigParser()
config_path = os.path.join(JARVIS_CLI_DIRECTORY, "cli_config.ini")
if config.read(config_path):
return config[environment]
else:
raise JarvisCliConfigError("Configuration not setup: {0}".format(config_path))
def get_client_connection(environment):
config = _get_config(environment)
return DBConn(config["host"], config["port"])
def get_jarvis_data_directory(environment):
config = _get_config(environment)
return config["data_directory"]
def get_jarvis_snapshots_directory(environment):
config = _get_config(environment)
return config["snapshots_directory"]
| import os
import configparser
from jarvis_cli.exceptions import JarvisCliConfigError
from jarvis_cli.client import DBConn
def get_client_connection(environment):
config = configparser.ConfigParser()
config_path = os.path.join(os.environ["HOME"], ".jarvis", "cli_config.ini")
if config.read(config_path):
return DBConn(config[environment]["host"], config[environment]["port"])
else:
raise JarvisCliConfigError("Configuration not setup: {0}".format(config_path))
| apache-2.0 | Python |
6acdeab4e0b6811ba4d7b03e2a00860a5783e20f | add shortcode to admin | praekelt/hellomama-registration,praekelt/hellomama-registration | vas2nets/admin.py | vas2nets/admin.py | from django.contrib import admin
from .models import VoiceCall
class VoiceCallAdmin(admin.ModelAdmin):
list_display = ['id', 'shortcode', 'created_at', 'msisdn', 'duration',
'reason']
admin.site.register(VoiceCall, VoiceCallAdmin)
| from django.contrib import admin
from .models import VoiceCall
class VoiceCallAdmin(admin.ModelAdmin):
list_display = ['id', 'created_at', 'msisdn', 'duration', 'reason']
admin.site.register(VoiceCall, VoiceCallAdmin)
| bsd-3-clause | Python |
99a2a7535478e77154670cc3a11832d7a5ac7d78 | Update version | globocom/vault,globocom/vault,globocom/vault,globocom/vault | vault/__init__.py | vault/__init__.py | # -*- coding: utf-8 -*-
default_app_config = 'vault.apps.VaultConfig'
__version__ = '1.1.1'
| # -*- coding: utf-8 -*-
default_app_config = 'vault.apps.VaultConfig'
__version__ = '1.1.0'
| apache-2.0 | Python |
9ccae8267d1884e25bf5268fe59b16e99475397f | Change hidden function | RuiShu/Neural-Net-Bayesian-Optimization,RuiShu/Neural-Net-Bayesian-Optimization,RuiShu/Neural-Net-Bayesian-Optimization | learning_objective/hidden_function.py | learning_objective/hidden_function.py | """
@Author: Rui Shu
@Date: 4/11/15
Provides a proxy hidden function for running of optimizer and mpi_optimizer
"""
import numpy as np
import time
noiseless_g = lambda x: 10*np.sin(x) - x
g = lambda x: noiseless_g(x) + np.random.randn()/10 # Define the hidden function
def evaluate(query, lim_domain):
""" Queries a single point with noise.
Keyword arguments:
query -- a (m,) array. Single point query in input space scaled to unit cube.
lim_domain -- a (2, m) array. Defines the search space boundaries of the
true input space
"""
var = (lim_domain[1, :] - lim_domain[0, :])/2.
mean = (lim_domain[1, :] + lim_domain[0, :])/2.
query = np.atleast_2d(query) # Convert to (1, m) array
X = query*var + mean # Scale query to true input space
Y = np.atleast_2d(g(X[0, 0])) # Compute output
dataset = np.concatenate((query, Y), axis=1)
# time.sleep(0.5)
return dataset
def true_evaluate(query, lim_domain):
""" Queries a single point without noise.
Keyword arguments:
query -- a (m,) array. Single point query in input space scaled to unit cube.
lim_domain -- a (2, m) array. Defines the search space boundaries of the
true input space
"""
var = (lim_domain[1, :] - lim_domain[0, :])/2.
mean = (lim_domain[1, :] + lim_domain[0, :])/2.
query = np.atleast_2d(query) # Convert to (1, m) array
X = query*var + mean # Scale query to true input space
Y = np.atleast_2d(noiseless_g(X[0, 0])) # Compute output
dataset = np.concatenate((query, Y), axis=1)
# time.sleep(0.5)
return dataset
| """
@Author: Rui Shu
@Date: 4/11/15
Provides a proxy hidden function for running of optimizer and mpi_optimizer
"""
import numpy as np
import time
noiseless_g = lambda x: 10*np.sin(x) - x
g = lambda x: noiseless_g(x) + np.random.randn()/10 # Define the hidden function
def evaluate(query, lim_domain):
""" Queries a single point with noise.
Keyword arguments:
query -- a (m,) array. Single point query in input space scaled to unit cube.
lim_domain -- a (2, m) array. Defines the search space boundaries of the
true input space
"""
var = (lim_domain[1, :] - lim_domain[0, :])/2.
mean = (lim_domain[1, :] + lim_domain[0, :])/2.
query = np.atleast_2d(query) # Convert to (1, m) array
X = query*var + mean # Scale query to true input space
Y = np.atleast_2d(g(X)[0, 0]) # Compute output
dataset = np.concatenate((query, Y), axis=1)
# time.sleep(0.5)
return dataset
def true_evaluate(query, lim_domain):
""" Queries a single point without noise.
Keyword arguments:
query -- a (m,) array. Single point query in input space scaled to unit cube.
lim_domain -- a (2, m) array. Defines the search space boundaries of the
true input space
"""
var = (lim_domain[1, :] - lim_domain[0, :])/2.
mean = (lim_domain[1, :] + lim_domain[0, :])/2.
query = np.atleast_2d(query) # Convert to (1, m) array
X = query*var + mean # Scale query to true input space
Y = np.atleast_2d(noiseless_g(X)[0, 0]) # Compute output
dataset = np.concatenate((query, Y), axis=1)
# time.sleep(0.5)
return dataset
| mit | Python |
ed09a238f8ff2925305e82ff310127fc3ba632e0 | make cache update nullable | django-leonardo/django-leonardo,django-leonardo/django-leonardo,amboycharlie/Child-Friendly-LCMS,amboycharlie/Child-Friendly-LCMS,amboycharlie/Child-Friendly-LCMS,django-leonardo/django-leonardo,django-leonardo/django-leonardo,amboycharlie/Child-Friendly-LCMS | leonardo/module/web/widgets/mixins.py | leonardo/module/web/widgets/mixins.py |
from __future__ import unicode_literals
from django.db import models
from django.utils.translation import ugettext_lazy as _
from .const import PAGINATION_CHOICES
class ListWidgetMixin(models.Model):
"""Common fields for object lists
"""
objects_per_page = models.PositiveIntegerField(
verbose_name=_('Objects per page'), blank=True, default=6)
objects_per_row = models.PositiveIntegerField(
verbose_name=_('Objects per row'), blank=True, default=3)
pagination = models.CharField(
verbose_name=_("Pagination"), max_length=50,
choices=PAGINATION_CHOICES, default='paginator')
pagination_style = models.CharField(
verbose_name=_("Pagination Style"), max_length=50,
choices=PAGINATION_CHOICES, default='paginator')
class Meta:
abstract = True
class ContentProxyWidgetMixin(models.Model):
"""Content proxy widget mixin
"""
source_address = models.CharField(
verbose_name=_("Source Address"), max_length=255)
cache_validity = models.PositiveIntegerField(
verbose_name=_('Cache validity'), default=3600)
cache_update = models.PositiveIntegerField(
verbose_name=_('Cache update'), editable=False, null=True, blank=True)
cache_data = models.TextField(
verbose_name=_("Cache data"), blank=True)
class Meta:
abstract = True
|
from __future__ import unicode_literals
from django.db import models
from django.utils.translation import ugettext_lazy as _
from .const import PAGINATION_CHOICES
class ListWidgetMixin(models.Model):
"""Common fields for object lists
"""
objects_per_page = models.PositiveIntegerField(
verbose_name=_('Objects per page'), blank=True, default=6)
objects_per_row = models.PositiveIntegerField(
verbose_name=_('Objects per row'), blank=True, default=3)
pagination = models.CharField(
verbose_name=_("Pagination"), max_length=50,
choices=PAGINATION_CHOICES, default='paginator')
pagination_style = models.CharField(
verbose_name=_("Pagination Style"), max_length=50,
choices=PAGINATION_CHOICES, default='paginator')
class Meta:
abstract = True
class ContentProxyWidgetMixin(models.Model):
"""Content proxy widget mixin
"""
source_address = models.CharField(
verbose_name=_("Source Address"), max_length=255)
cache_validity = models.PositiveIntegerField(
verbose_name=_('Cache validity'), default=3600)
cache_update = models.PositiveIntegerField(
verbose_name=_('Cache update'), editable=False)
cache_data = models.TextField(
verbose_name=_("Cache data"), blank=True)
class Meta:
abstract = True
| bsd-3-clause | Python |
2e27306c09038dda3dd722282c9cf2645d13c113 | Fix fpm on Python2.5 | schatt/fpm,elvido/fpm,wyaeld/fpm,fetep/fpm,gearmover/fpm,djhaskin987/merge-demo,pskrz/fpm,rbramwell/fpm,josephfrazier/fpm,gearmover/fpm,gearmover/fpm,josephfrazier/fpm,josephfrazier/fpm,dischord01/fpm,tjyang/fpm,cturra/fpm,wyaeld/fpm,doghrim/fpm,hlawrenz/fpm,sideci-sample/sideci-sample-fpm,vi4m/fpm,josephfrazier/fpm,paul-krohn/fpm,hlawrenz/fpm,gearmover/fpm,pskrz/fpm,schatt/fpm,dischord01/fpm,godp1301/fpm,wyaeld/fpm,wyaeld/fpm,vi4m/fpm,mzaccari/fpm,doghrim/fpm,fetep/fpm,elvido/fpm,snailwalker/fpm,virtkick/fpm,johnbintz/fpm,rbramwell/fpm,joseph-onsip/fpm,jerizm/fpm,cturra/fpm,godp1301/fpm,hlawrenz/fpm,snailwalker/fpm,dischord01/fpm,johnbintz/fpm,doghrim/fpm,elvido/fpm,Tapjoy/fpm,mildred/fpm,cturra/fpm,cturra/fpm,godp1301/fpm,snailwalker/fpm,paul-krohn/fpm,hlawrenz/fpm,paul-krohn/fpm,elvido/fpm,joseph-onsip/fpm,mzaccari/fpm,tjyang/fpm,tjyang/fpm,vi4m/fpm,godp1301/fpm,mildred/fpm,fetep/fpm,rbramwell/fpm,mildred/fpm,mildred/fpm,djhaskin987/merge-demo,linearregression/fpm,mzaccari/fpm,schatt/fpm,vi4m/fpm,joseph-onsip/fpm,tjyang/fpm,jerizm/fpm,pskrz/fpm,pskrz/fpm,rbramwell/fpm,virtkick/fpm,linearregression/fpm,virtkick/fpm,sideci-sample/sideci-sample-fpm,Tapjoy/fpm,Tapjoy/fpm,joseph-onsip/fpm,schatt/fpm,paul-krohn/fpm,virtkick/fpm,johnbintz/fpm,jerizm/fpm,jerizm/fpm,sideci-sample/sideci-sample-fpm,fetep/fpm,mzaccari/fpm,djhaskin987/merge-demo,snailwalker/fpm,Tapjoy/fpm,linearregression/fpm,linearregression/fpm,dischord01/fpm,doghrim/fpm | lib/fpm/package/pyfpm/get_metadata.py | lib/fpm/package/pyfpm/get_metadata.py | from distutils.core import Command
import re
import time
import pkg_resources
try:
import json
except ImportError:
import simplejson as json
# Note, the last time I coded python daily was at Google, so it's entirely
# possible some of my techniques below are outdated or bad.
# If you have fixes, let me know.
class get_metadata(Command):
description = "get package metadata"
user_options = []
def initialize_options(self):
pass
# def initialize_options
def finalize_options(self):
pass
# def finalize_options
def run(self):
#print type(self.distribution)
#for i in sorted(dir(self.distribution)):
#if i.startswith("_"):
#continue
###print "%s: %r" % (i, self.__getattr__(i))
#print "%s" % i
data = {
"name": self.distribution.get_name(),
"version": self.distribution.get_version(),
"author": "%s <%s>" % (self.distribution.get_author(),
self.distribution.get_author_email()),
"description": self.distribution.get_description(),
"license": self.distribution.get_license(),
"url": self.distribution.get_url(),
}
# If there are python C/extension modules, we'll want to build a native
# arch package.
if self.distribution.has_ext_modules():
data["architecture"] = "native"
else:
data["architecture"] = "all"
# end if
final_deps = []
if getattr(self.distribution, 'install_requires', None):
for dep in pkg_resources.parse_requirements(self.distribution.install_requires):
# add all defined specs to the dependecy list separately.
if dep.specs:
for operator, version in dep.specs:
final_deps.append("%s %s %s" % (
dep.project_name,
"=" if operator == "==" else operator,
version
))
else:
final_deps.append(dep.project_name)
data["dependencies"] = final_deps
#print json.dumps(data, indent=2)
if hasattr(json, 'dumps'):
print(json.dumps(data, indent=2))
else:
# For Python 2.5 and Debian's python-json
print(json.write(data))
# def run
# class list_dependencies
| from distutils.core import Command
import json
import re
import time
import pkg_resources
# Note, the last time I coded python daily was at Google, so it's entirely
# possible some of my techniques below are outdated or bad.
# If you have fixes, let me know.
class get_metadata(Command):
description = "get package metadata"
user_options = []
def initialize_options(self):
pass
# def initialize_options
def finalize_options(self):
pass
# def finalize_options
def run(self):
#print type(self.distribution)
#for i in sorted(dir(self.distribution)):
#if i.startswith("_"):
#continue
###print "%s: %r" % (i, self.__getattr__(i))
#print "%s" % i
data = {
"name": self.distribution.get_name(),
"version": self.distribution.get_version(),
"author": "%s <%s>" % (self.distribution.get_author(),
self.distribution.get_author_email()),
"description": self.distribution.get_description(),
"license": self.distribution.get_license(),
"url": self.distribution.get_url(),
}
# If there are python C/extension modules, we'll want to build a native
# arch package.
if self.distribution.has_ext_modules():
data["architecture"] = "native"
else:
data["architecture"] = "all"
# end if
final_deps = []
if getattr(self.distribution, 'install_requires', None):
for dep in pkg_resources.parse_requirements(self.distribution.install_requires):
# add all defined specs to the dependecy list separately.
if dep.specs:
for operator, version in dep.specs:
final_deps.append("%s %s %s" % (
dep.project_name,
"=" if operator == "==" else operator,
version
))
else:
final_deps.append(dep.project_name)
data["dependencies"] = final_deps
#print json.dumps(data, indent=2)
try:
print(json.dumps(data, indent=2))
except AttributeError as e:
# For Python 2.5 and Debian's python-json
print(json.write(data))
# def run
# class list_dependencies
| mit | Python |
a79f6d6b07a77a11314d5c763905a048db2783a6 | remove obsolete state return feature | nominum/nomcc | nomcc/message.py | nomcc/message.py | # Copyright (C) 2011-2014,2016 Nominum, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def reply_to(request, request_type=None):
_ctrl = {}
_data = {}
response = {'_ctrl' : _ctrl, '_data' : _data}
if request_type is not None:
t = request_type
else:
t = request['_data'].get('type')
if t is not None:
_data['type'] = t
_ctrl['_rpl'] = b'1'
_ctrl['_rseq'] = request['_ctrl']['_sseq']
s = request['_ctrl'].get('_seq')
if s is not None:
_ctrl['_seq'] = s
return response
def error(request, detail, request_type=None):
response = reply_to(request, request_type)
response['_data']['err'] = detail
return response
def request(content):
message = {'_ctrl' : {},
'_data' : content}
return message
def event(content):
message = {'_ctrl' : {'_evt' : b'1'},
'_data' : content}
return message
def is_reply(message):
return '_rpl' in message['_ctrl']
def is_event(message):
return '_evt' in message['_ctrl']
def is_request(message):
_ctrl = message['_ctrl']
return not ('_rpl' in _ctrl or '_evt' in _ctrl)
def kind(message):
_ctrl = message['_ctrl']
if '_rpl' in _ctrl:
return 'response'
elif '_evt' in _ctrl:
return 'event'
else:
return 'request'
kinds = frozenset(('request', 'response', 'event'))
| # Copyright (C) 2011-2014,2016 Nominum, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def reply_to(request, request_type=None):
_ctrl = {}
_data = {}
response = {'_ctrl' : _ctrl, '_data' : _data}
if request_type is not None:
t = request_type
else:
t = request['_data'].get('type')
if t is not None:
_data['type'] = t
_ctrl['_rpl'] = b'1'
_ctrl['_rseq'] = request['_ctrl']['_sseq']
s = request['_ctrl'].get('_seq')
if s is not None:
_ctrl['_seq'] = s
#
# State return
#
_state = request['_ctrl'].get('_state')
if _state is not None:
_ctrl['_state'] = _state
return response
def error(request, detail, request_type=None):
response = reply_to(request, request_type)
response['_data']['err'] = detail
return response
def request(content):
message = {'_ctrl' : {},
'_data' : content}
return message
def event(content):
message = {'_ctrl' : {'_evt' : b'1'},
'_data' : content}
return message
def is_reply(message):
return '_rpl' in message['_ctrl']
def is_event(message):
return '_evt' in message['_ctrl']
def is_request(message):
_ctrl = message['_ctrl']
return not ('_rpl' in _ctrl or '_evt' in _ctrl)
def kind(message):
_ctrl = message['_ctrl']
if '_rpl' in _ctrl:
return 'response'
elif '_evt' in _ctrl:
return 'event'
else:
return 'request'
kinds = frozenset(('request', 'response', 'event'))
| apache-2.0 | Python |
762a4351c2d1552cd99a2ce90c8641d781eec259 | change for windows... | 4D42/pdfcropall | pdfcropall.py | pdfcropall.py | #!/usr/bin/env python3
import sys
import os
def for_os(filesname):
osname = sys.platform
if osname == "linux":
filesname = filesname.replace(' ','\ ')
filesname = filesname.replace('(','\(')
filesname = filesname.replace(')','\)')
if osname == "win32":
filesname = '"'+filesname+'"'
return filesname
if __name__ == "__main__":
print("Start")
allthefiles = os.listdir()
onlypdffiles = []
for name in allthefiles:
if name.count(".pdf")==1:
onlypdffiles.append(name)
onlynoncroppdf = []
for name in onlypdffiles:
if name.count("-crop.pdf") == 0:
onlynoncroppdf.append(name)
print("croping pdfs")
for name in onlynoncroppdf:
name = for_os(name)
os.system("pdfcrop "+name)
print("Done")
input("Press Enter to finish")
| #!/usr/bin/env python3
import sys
import os
def for_os(filesname):
osname = sys.platform
if osname == "linux":
filesname = filesname.replace(' ','\ ')
filesname = filesname.replace('(','\(')
filesname = filesname.replace(')','\)')
#if osname == "win32":
return filesname
if __name__ == "__main__":
print("Start")
allthefiles = os.listdir()
onlypdffiles = []
for name in allthefiles:
if name.count(".pdf")==1:
onlypdffiles.append(name)
onlynoncroppdf = []
for name in onlypdffiles:
if name.count("-crop.pdf") == 0:
onlynoncroppdf.append(name)
print("croping pdfs")
for name in onlynoncroppdf:
name = for_os(name)
os.system("pdfcrop "+name)
print("Done")
input("Press Enter to finish")
| mit | Python |
b77b030908a6c589411224912f9a5e5e0e090829 | Make printf %-safe | Nazek42/wistful-c | wistfulc.py | wistfulc.py | sample_code = """\
if only <stdlib.h> were included...
if only <stdio.h> were included...
if only <math.h> were included...
if only int x were 3...
if x were 3...
wish "Hello World" upon a star
*sigh*
if wishes were horses..."""
import re
from functools import reduce
import subprocess
import os
import sys
def add_to_includes(matchobj):
includes.append("#include " + matchobj.group(1))
SUBS = [
(r"if wishes were horses\.\.\.", "exit(0);"),
(r'wish for (.*?) upon a star', r"scanf(\1);"),
(r"wish (.*?) upon a star", r'printf("%s", \1);'),
(r"if only (<.*?>) were included\.\.\.", add_to_includes),
(r"if only ([\w *]+) were (.*?)\.\.\.", r"\1 = \2;"),
(r"if only I could return (.*?)\.\.\.", r"return \1;"),
(r"\*sigh\*", "}"),
(r"if (.*?) ?\.\.\.", r"if(\1) {"),
(r"wait for (.*?) to change\.\.\.", r"while(\1) {"),
(r"someday (.*?) ?\.\.\.", r"while(!(\1)) {"),
(r" +were +", r" == "),
(r" +will be +", r" == "),
]
includes = ["#include <stdbool.h>"]
def compile_code(code):
global includes
compiled = reduce(lambda code, sub: re.sub(sub[0], sub[1], code), SUBS, code)
return "\n".join(includes)+"\nint main() {" + compiled + "}"
def run_code(compiled):
exepath = "tempsigh" + str(os.getpid())
srcpath = exepath + ".c"
with open(srcpath, "wt") as source_file:
source_file.write(compiled)
subprocess.call(["gcc", "-w", "-o", exepath, srcpath])
subprocess.call(["./" + exepath])
os.remove(exepath)
os.remove(srcpath)
if __name__ == "__main__":
with open(sys.argv[1]) as code_file:
run_code(compile_code(code_file.read()))
| sample_code = """\
if only <stdlib.h> were included...
if only <stdio.h> were included...
if only <math.h> were included...
if only int x were 3...
if x were 3...
wish "Hello World" upon a star
*sigh*
if wishes were horses..."""
import re
from functools import reduce
import subprocess
import os
import sys
def add_to_includes(matchobj):
includes.append("#include " + matchobj.group(1))
SUBS = [
(r"if wishes were horses\.\.\.", "exit(0);"),
(r'wish for (.*?) upon a star', r"scanf(\1);"),
(r"wish (.*?) upon a star", r"printf(\1);"),
(r"if only (<.*?>) were included\.\.\.", add_to_includes),
(r"if only ([\w *]+) were (.*?)\.\.\.", r"\1 = \2;"),
(r"if only I could return (.*?)\.\.\.", r"return \1;"),
(r"\*sigh\*", "}"),
(r"if (.*?) ?\.\.\.", r"if(\1) {"),
(r"wait for (.*?) to change\.\.\.", r"while(\1) {"),
(r"someday (.*?) ?\.\.\.", r"while(!(\1)) {"),
(r" +were +", r" == "),
(r" +will be +", r" == "),
]
includes = ["#include <stdbool.h>"]
def compile_code(code):
global includes
compiled = reduce(lambda code, sub: re.sub(sub[0], sub[1], code), SUBS, code)
return "\n".join(includes)+"\nint main() {" + compiled + "}"
def run_code(compiled):
exepath = "tempsigh" + str(os.getpid())
srcpath = exepath + ".c"
with open(srcpath, "wt") as source_file:
source_file.write(compiled)
subprocess.call(["gcc", "-w", "-o", exepath, srcpath])
subprocess.call(["./" + exepath])
os.remove(exepath)
os.remove(srcpath)
if __name__ == "__main__":
with open(sys.argv[1]) as code_file:
run_code(compile_code(code_file.read()))
| apache-2.0 | Python |
847a66ed8eb19206ecc77904dd5db547284b905f | Make sure exit code is used in -E situation | mindw/pip,pjdelport/pip,patricklaw/pip,alquerci/pip,esc/pip,harrisonfeng/pip,haridsv/pip,prasaianooz/pip,habnabit/pip,ncoghlan/pip,blarghmatey/pip,h4ck3rm1k3/pip,msabramo/pip,Gabriel439/pip,alex/pip,xavfernandez/pip,zvezdan/pip,haridsv/pip,Ivoz/pip,cjerdonek/pip,yati-sagade/pip,harrisonfeng/pip,RonnyPfannschmidt/pip,alex/pip,sigmavirus24/pip,erikrose/pip,qwcode/pip,pjdelport/pip,patricklaw/pip,zvezdan/pip,habnabit/pip,domenkozar/pip,willingc/pip,squidsoup/pip,erikrose/pip,pypa/pip,pfmoore/pip,jmagnusson/pip,mindw/pip,minrk/pip,prasaianooz/pip,davidovich/pip,Carreau/pip,jythontools/pip,techtonik/pip,luzfcb/pip,sbidoul/pip,zenlambda/pip,supriyantomaftuh/pip,blarghmatey/pip,h4ck3rm1k3/pip,pjdelport/pip,xavfernandez/pip,luzfcb/pip,pradyunsg/pip,habnabit/pip,RonnyPfannschmidt/pip,rbtcollins/pip,RonnyPfannschmidt/pip,pypa/pip,yati-sagade/pip,supriyantomaftuh/pip,mindw/pip,pradyunsg/pip,alquerci/pip,rouge8/pip,prasaianooz/pip,jamezpolley/pip,tdsmith/pip,mujiansu/pip,ianw/pip,zvezdan/pip,erikrose/pip,qbdsoft/pip,mujiansu/pip,radiosilence/pip,James-Firth/pip,jasonkying/pip,fiber-space/pip,KarelJakubec/pip,natefoo/pip,qbdsoft/pip,sbidoul/pip,xavfernandez/pip,msabramo/pip,rbtcollins/pip,esc/pip,atdaemon/pip,natefoo/pip,sigmavirus24/pip,willingc/pip,rbtcollins/pip,dstufft/pip,zorosteven/pip,supriyantomaftuh/pip,atdaemon/pip,jamezpolley/pip,tdsmith/pip,ncoghlan/pip,ChristopherHogan/pip,ChristopherHogan/pip,jmagnusson/pip,chaoallsome/pip,luzfcb/pip,squidsoup/pip,wkeyword/pip,jamezpolley/pip,zorosteven/pip,qbdsoft/pip,wkeyword/pip,fiber-space/pip,ianw/pip,esc/pip,wkeyword/pip,davidovich/pip,qwcode/pip,haridsv/pip,graingert/pip,nthall/pip,Carreau/pip,dstufft/pip,alex/pip,Gabriel439/pip,caosmo/pip,ncoghlan/pip,graingert/pip,tdsmith/pip,sigmavirus24/pip,harrisonfeng/pip,graingert/pip,techtonik/pip,blarghmatey/pip,dstufft/pip,rouge8/pip,squidsoup/pip,jythontools/pip,minrk/pip,benesch/pip,davidovich/pip,caosmo/pip,willingc/pip,ChristopherHogan/pip,atdaemon/pip,nthall/pip,h4ck3rm1k3/pip,Ivoz/pip,mujiansu/pip,James-Firth/pip,benesch/pip,yati-sagade/pip,jmagnusson/pip,benesch/pip,techtonik/pip,jasonkying/pip,mattrobenolt/pip,KarelJakubec/pip,jythontools/pip,natefoo/pip,mattrobenolt/pip,caosmo/pip,cjerdonek/pip,pfmoore/pip,KarelJakubec/pip,fiber-space/pip,rouge8/pip,Gabriel439/pip,nthall/pip,zenlambda/pip,zenlambda/pip,chaoallsome/pip,chaoallsome/pip,zorosteven/pip,jasonkying/pip,James-Firth/pip | pip/runner.py | pip/runner.py | import sys
import os
def run():
base = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
## FIXME: this is kind of crude; if we could create a fake pip
## module, then exec into it and update pip.__path__ properly, we
## wouldn't have to update sys.path:
sys.path.insert(0, base)
import pip
return pip.main()
if __name__ == '__main__':
exit = run()
if exit:
sys.exit(exit)
| import sys
import os
def run():
base = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
## FIXME: this is kind of crude; if we could create a fake pip
## module, then exec into it and update pip.__path__ properly, we
## wouldn't have to update sys.path:
sys.path.insert(0, base)
import pip
return pip.main()
if __name__ == '__main__':
run()
| mit | Python |
d854e19f4a1c6e5b2ae4007152bbd962fb851642 | Update version number for trunk to 1.4.0. | jankoslavic/numpy,felipebetancur/numpy,jschueller/numpy,bringingheavendown/numpy,mhvk/numpy,SiccarPoint/numpy,BabeNovelty/numpy,Linkid/numpy,mortada/numpy,simongibbons/numpy,Srisai85/numpy,mathdd/numpy,dwillmer/numpy,Srisai85/numpy,sigma-random/numpy,tacaswell/numpy,bringingheavendown/numpy,behzadnouri/numpy,behzadnouri/numpy,gmcastil/numpy,ahaldane/numpy,Dapid/numpy,tynn/numpy,pelson/numpy,kirillzhuravlev/numpy,skymanaditya1/numpy,endolith/numpy,BabeNovelty/numpy,dwillmer/numpy,drasmuss/numpy,tdsmith/numpy,SunghanKim/numpy,felipebetancur/numpy,naritta/numpy,SunghanKim/numpy,KaelChen/numpy,ahaldane/numpy,solarjoe/numpy,GrimDerp/numpy,Anwesh43/numpy,ewmoore/numpy,trankmichael/numpy,BMJHayward/numpy,rgommers/numpy,NextThought/pypy-numpy,leifdenby/numpy,jorisvandenbossche/numpy,ddasilva/numpy,gfyoung/numpy,pbrod/numpy,immerrr/numpy,immerrr/numpy,GaZ3ll3/numpy,sigma-random/numpy,dato-code/numpy,mortada/numpy,matthew-brett/numpy,larsmans/numpy,CMartelLML/numpy,ewmoore/numpy,grlee77/numpy,ekalosak/numpy,bmorris3/numpy,MaPePeR/numpy,Eric89GXL/numpy,SiccarPoint/numpy,WarrenWeckesser/numpy,MichaelAquilina/numpy,stefanv/numpy,rmcgibbo/numpy,WillieMaddox/numpy,pdebuyl/numpy,moreati/numpy,numpy/numpy,charris/numpy,ChanderG/numpy,embray/numpy,musically-ut/numpy,madphysicist/numpy,ahaldane/numpy,charris/numpy,dwf/numpy,naritta/numpy,stefanv/numpy,SiccarPoint/numpy,gmcastil/numpy,endolith/numpy,rgommers/numpy,gfyoung/numpy,chiffa/numpy,pdebuyl/numpy,ewmoore/numpy,leifdenby/numpy,empeeu/numpy,MSeifert04/numpy,felipebetancur/numpy,Dapid/numpy,groutr/numpy,mingwpy/numpy,kirillzhuravlev/numpy,pyparallel/numpy,argriffing/numpy,BMJHayward/numpy,grlee77/numpy,BabeNovelty/numpy,jschueller/numpy,NextThought/pypy-numpy,seberg/numpy,GaZ3ll3/numpy,Eric89GXL/numpy,Linkid/numpy,mindw/numpy,GrimDerp/numpy,Srisai85/numpy,Anwesh43/numpy,numpy/numpy-refactor,embray/numpy,rherault-insa/numpy,mindw/numpy,sonnyhu/numpy,ChristopherHogan/numpy,SiccarPoint/numpy,hainm/numpy,pbrod/numpy,dimasad/numpy,numpy/numpy-refactor,grlee77/numpy,pyparallel/numpy,mortada/numpy,ssanderson/numpy,numpy/numpy,hainm/numpy,MSeifert04/numpy,sonnyhu/numpy,rudimeier/numpy,MaPePeR/numpy,mattip/numpy,astrofrog/numpy,anntzer/numpy,chiffa/numpy,charris/numpy,jakirkham/numpy,mingwpy/numpy,pelson/numpy,bmorris3/numpy,madphysicist/numpy,ddasilva/numpy,AustereCuriosity/numpy,chatcannon/numpy,maniteja123/numpy,brandon-rhodes/numpy,rgommers/numpy,seberg/numpy,BabeNovelty/numpy,nguyentu1602/numpy,ContinuumIO/numpy,pbrod/numpy,madphysicist/numpy,mwiebe/numpy,abalkin/numpy,ChanderG/numpy,astrofrog/numpy,jorisvandenbossche/numpy,dimasad/numpy,rudimeier/numpy,MichaelAquilina/numpy,skymanaditya1/numpy,joferkington/numpy,Yusa95/numpy,ahaldane/numpy,dimasad/numpy,kirillzhuravlev/numpy,githubmlai/numpy,skwbc/numpy,rajathkumarmp/numpy,empeeu/numpy,kiwifb/numpy,ChristopherHogan/numpy,mwiebe/numpy,matthew-brett/numpy,Anwesh43/numpy,pizzathief/numpy,pdebuyl/numpy,andsor/numpy,simongibbons/numpy,numpy/numpy,mingwpy/numpy,AustereCuriosity/numpy,WarrenWeckesser/numpy,dato-code/numpy,argriffing/numpy,andsor/numpy,ChristopherHogan/numpy,felipebetancur/numpy,rhythmsosad/numpy,ddasilva/numpy,WillieMaddox/numpy,b-carter/numpy,skwbc/numpy,rajathkumarmp/numpy,utke1/numpy,andsor/numpy,sigma-random/numpy,astrofrog/numpy,shoyer/numpy,shoyer/numpy,numpy/numpy-refactor,utke1/numpy,stuarteberg/numpy,ewmoore/numpy,tdsmith/numpy,CMartelLML/numpy,ViralLeadership/numpy,utke1/numpy,behzadnouri/numpy,Linkid/numpy,trankmichael/numpy,mattip/numpy,nbeaver/numpy,CMartelLML/numpy,mathdd/numpy,nguyentu1602/numpy,stefanv/numpy,dwf/numpy,musically-ut/numpy,jorisvandenbossche/numpy,drasmuss/numpy,jakirkham/numpy,cowlicks/numpy,dch312/numpy,anntzer/numpy,dch312/numpy,ESSS/numpy,hainm/numpy,groutr/numpy,skymanaditya1/numpy,numpy/numpy,musically-ut/numpy,moreati/numpy,grlee77/numpy,immerrr/numpy,ahaldane/numpy,mortada/numpy,madphysicist/numpy,chiffa/numpy,yiakwy/numpy,joferkington/numpy,endolith/numpy,rmcgibbo/numpy,kiwifb/numpy,ssanderson/numpy,larsmans/numpy,b-carter/numpy,tacaswell/numpy,cjermain/numpy,has2k1/numpy,argriffing/numpy,ekalosak/numpy,mhvk/numpy,pbrod/numpy,chatcannon/numpy,ChristopherHogan/numpy,MaPePeR/numpy,ESSS/numpy,mathdd/numpy,KaelChen/numpy,pizzathief/numpy,astrofrog/numpy,anntzer/numpy,BMJHayward/numpy,solarjoe/numpy,KaelChen/numpy,cowlicks/numpy,Yusa95/numpy,jonathanunderwood/numpy,matthew-brett/numpy,jakirkham/numpy,immerrr/numpy,ajdawson/numpy,ogrisel/numpy,embray/numpy,ssanderson/numpy,cjermain/numpy,cowlicks/numpy,embray/numpy,sinhrks/numpy,endolith/numpy,dch312/numpy,tdsmith/numpy,ChanderG/numpy,sinhrks/numpy,rhythmsosad/numpy,Eric89GXL/numpy,mhvk/numpy,ajdawson/numpy,has2k1/numpy,stefanv/numpy,jorisvandenbossche/numpy,njase/numpy,bmorris3/numpy,SunghanKim/numpy,dwf/numpy,mattip/numpy,joferkington/numpy,matthew-brett/numpy,shoyer/numpy,MSeifert04/numpy,maniteja123/numpy,bertrand-l/numpy,stefanv/numpy,jorisvandenbossche/numpy,empeeu/numpy,larsmans/numpy,grlee77/numpy,seberg/numpy,solarjoe/numpy,bmorris3/numpy,njase/numpy,dwf/numpy,groutr/numpy,bertrand-l/numpy,ekalosak/numpy,ajdawson/numpy,ekalosak/numpy,trankmichael/numpy,WarrenWeckesser/numpy,moreati/numpy,sonnyhu/numpy,brandon-rhodes/numpy,ogrisel/numpy,shoyer/numpy,stuarteberg/numpy,mhvk/numpy,ContinuumIO/numpy,brandon-rhodes/numpy,BMJHayward/numpy,bringingheavendown/numpy,yiakwy/numpy,abalkin/numpy,rhythmsosad/numpy,rajathkumarmp/numpy,WillieMaddox/numpy,skwbc/numpy,drasmuss/numpy,pelson/numpy,mingwpy/numpy,madphysicist/numpy,jankoslavic/numpy,skymanaditya1/numpy,AustereCuriosity/numpy,MichaelAquilina/numpy,tdsmith/numpy,matthew-brett/numpy,tynn/numpy,anntzer/numpy,jankoslavic/numpy,mattip/numpy,sonnyhu/numpy,sinhrks/numpy,abalkin/numpy,trankmichael/numpy,NextThought/pypy-numpy,empeeu/numpy,MSeifert04/numpy,simongibbons/numpy,hainm/numpy,naritta/numpy,ContinuumIO/numpy,githubmlai/numpy,njase/numpy,Yusa95/numpy,tacaswell/numpy,ogrisel/numpy,maniteja123/numpy,pizzathief/numpy,SunghanKim/numpy,kiwifb/numpy,ChanderG/numpy,b-carter/numpy,sinhrks/numpy,rherault-insa/numpy,MSeifert04/numpy,CMartelLML/numpy,astrofrog/numpy,GaZ3ll3/numpy,pizzathief/numpy,stuarteberg/numpy,mwiebe/numpy,numpy/numpy-refactor,shoyer/numpy,rajathkumarmp/numpy,Linkid/numpy,ewmoore/numpy,simongibbons/numpy,andsor/numpy,brandon-rhodes/numpy,Dapid/numpy,nbeaver/numpy,gmcastil/numpy,ESSS/numpy,ogrisel/numpy,nbeaver/numpy,ViralLeadership/numpy,GrimDerp/numpy,rudimeier/numpy,bertrand-l/numpy,cjermain/numpy,stuarteberg/numpy,Anwesh43/numpy,cjermain/numpy,simongibbons/numpy,jonathanunderwood/numpy,NextThought/pypy-numpy,Yusa95/numpy,rmcgibbo/numpy,GrimDerp/numpy,rherault-insa/numpy,cowlicks/numpy,dimasad/numpy,MaPePeR/numpy,yiakwy/numpy,sigma-random/numpy,rudimeier/numpy,ajdawson/numpy,mhvk/numpy,githubmlai/numpy,dwillmer/numpy,dch312/numpy,chatcannon/numpy,nguyentu1602/numpy,pelson/numpy,WarrenWeckesser/numpy,leifdenby/numpy,larsmans/numpy,jakirkham/numpy,joferkington/numpy,musically-ut/numpy,GaZ3ll3/numpy,rgommers/numpy,Eric89GXL/numpy,rhythmsosad/numpy,ViralLeadership/numpy,KaelChen/numpy,jonathanunderwood/numpy,naritta/numpy,mathdd/numpy,nguyentu1602/numpy,embray/numpy,pyparallel/numpy,jschueller/numpy,gfyoung/numpy,dwillmer/numpy,Srisai85/numpy,seberg/numpy,dwf/numpy,pizzathief/numpy,yiakwy/numpy,dato-code/numpy,githubmlai/numpy,rmcgibbo/numpy,MichaelAquilina/numpy,jakirkham/numpy,mindw/numpy,numpy/numpy-refactor,charris/numpy,has2k1/numpy,mindw/numpy,WarrenWeckesser/numpy,kirillzhuravlev/numpy,has2k1/numpy,jschueller/numpy,dato-code/numpy,pelson/numpy,ogrisel/numpy,jankoslavic/numpy,pbrod/numpy,pdebuyl/numpy,tynn/numpy | numpy/version.py | numpy/version.py | version='1.4.0'
release=False
if not release:
version += '.dev'
import os
svn_version_file = os.path.join(os.path.dirname(__file__),
'core','__svn_version__.py')
if os.path.isfile(svn_version_file):
import imp
svn = imp.load_module('numpy.core.__svn_version__',
open(svn_version_file),
svn_version_file,
('.py','U',1))
version += svn.version
| version='1.3.0'
release=False
if not release:
version += '.dev'
import os
svn_version_file = os.path.join(os.path.dirname(__file__),
'core','__svn_version__.py')
if os.path.isfile(svn_version_file):
import imp
svn = imp.load_module('numpy.core.__svn_version__',
open(svn_version_file),
svn_version_file,
('.py','U',1))
version += svn.version
| bsd-3-clause | Python |
779dc154b799a6660f7f60ef50c09fc445329999 | Update version to 1.0b3 | rhythmsosad/numpy,kirillzhuravlev/numpy,stefanv/numpy,endolith/numpy,ESSS/numpy,dch312/numpy,rgommers/numpy,pbrod/numpy,brandon-rhodes/numpy,tdsmith/numpy,mhvk/numpy,KaelChen/numpy,MSeifert04/numpy,empeeu/numpy,SunghanKim/numpy,CMartelLML/numpy,mortada/numpy,matthew-brett/numpy,Eric89GXL/numpy,pbrod/numpy,shoyer/numpy,ajdawson/numpy,ogrisel/numpy,jakirkham/numpy,ekalosak/numpy,stuarteberg/numpy,cowlicks/numpy,astrofrog/numpy,Anwesh43/numpy,kirillzhuravlev/numpy,ekalosak/numpy,tynn/numpy,ViralLeadership/numpy,shoyer/numpy,mwiebe/numpy,Yusa95/numpy,ContinuumIO/numpy,MSeifert04/numpy,sinhrks/numpy,madphysicist/numpy,Dapid/numpy,sonnyhu/numpy,WillieMaddox/numpy,cowlicks/numpy,abalkin/numpy,rherault-insa/numpy,andsor/numpy,pizzathief/numpy,immerrr/numpy,seberg/numpy,cjermain/numpy,simongibbons/numpy,githubmlai/numpy,skymanaditya1/numpy,Anwesh43/numpy,grlee77/numpy,groutr/numpy,ChristopherHogan/numpy,Srisai85/numpy,rmcgibbo/numpy,empeeu/numpy,gfyoung/numpy,nbeaver/numpy,shoyer/numpy,astrofrog/numpy,NextThought/pypy-numpy,dimasad/numpy,rmcgibbo/numpy,rmcgibbo/numpy,MichaelAquilina/numpy,astrofrog/numpy,pizzathief/numpy,kirillzhuravlev/numpy,b-carter/numpy,embray/numpy,Linkid/numpy,dwf/numpy,dch312/numpy,b-carter/numpy,rgommers/numpy,stuarteberg/numpy,ajdawson/numpy,felipebetancur/numpy,jorisvandenbossche/numpy,numpy/numpy-refactor,gmcastil/numpy,GrimDerp/numpy,jankoslavic/numpy,maniteja123/numpy,Srisai85/numpy,dato-code/numpy,seberg/numpy,seberg/numpy,numpy/numpy,Eric89GXL/numpy,behzadnouri/numpy,immerrr/numpy,simongibbons/numpy,dimasad/numpy,sigma-random/numpy,NextThought/pypy-numpy,BabeNovelty/numpy,MSeifert04/numpy,kiwifb/numpy,tynn/numpy,pyparallel/numpy,WillieMaddox/numpy,leifdenby/numpy,chatcannon/numpy,jorisvandenbossche/numpy,mattip/numpy,kirillzhuravlev/numpy,dimasad/numpy,skwbc/numpy,ViralLeadership/numpy,hainm/numpy,moreati/numpy,WarrenWeckesser/numpy,ssanderson/numpy,MaPePeR/numpy,cjermain/numpy,larsmans/numpy,naritta/numpy,rajathkumarmp/numpy,groutr/numpy,GrimDerp/numpy,has2k1/numpy,Yusa95/numpy,ddasilva/numpy,ChanderG/numpy,ewmoore/numpy,nguyentu1602/numpy,stefanv/numpy,mortada/numpy,skymanaditya1/numpy,embray/numpy,abalkin/numpy,rherault-insa/numpy,gmcastil/numpy,mattip/numpy,Dapid/numpy,ssanderson/numpy,BabeNovelty/numpy,maniteja123/numpy,musically-ut/numpy,jankoslavic/numpy,dwillmer/numpy,WarrenWeckesser/numpy,dwillmer/numpy,naritta/numpy,AustereCuriosity/numpy,ahaldane/numpy,nguyentu1602/numpy,drasmuss/numpy,dch312/numpy,WillieMaddox/numpy,yiakwy/numpy,mattip/numpy,CMartelLML/numpy,seberg/numpy,groutr/numpy,dimasad/numpy,ewmoore/numpy,KaelChen/numpy,dwf/numpy,nguyentu1602/numpy,rhythmsosad/numpy,chiffa/numpy,mindw/numpy,AustereCuriosity/numpy,has2k1/numpy,tdsmith/numpy,numpy/numpy,njase/numpy,jakirkham/numpy,Anwesh43/numpy,ddasilva/numpy,AustereCuriosity/numpy,chiffa/numpy,WarrenWeckesser/numpy,mwiebe/numpy,felipebetancur/numpy,rhythmsosad/numpy,WarrenWeckesser/numpy,sonnyhu/numpy,stefanv/numpy,matthew-brett/numpy,felipebetancur/numpy,rudimeier/numpy,anntzer/numpy,mathdd/numpy,Srisai85/numpy,larsmans/numpy,mortada/numpy,simongibbons/numpy,bertrand-l/numpy,ahaldane/numpy,mhvk/numpy,immerrr/numpy,stuarteberg/numpy,musically-ut/numpy,Yusa95/numpy,stuarteberg/numpy,solarjoe/numpy,mindw/numpy,solarjoe/numpy,chiffa/numpy,bmorris3/numpy,ChristopherHogan/numpy,charris/numpy,mindw/numpy,ogrisel/numpy,larsmans/numpy,sinhrks/numpy,ogrisel/numpy,argriffing/numpy,sonnyhu/numpy,KaelChen/numpy,jorisvandenbossche/numpy,bmorris3/numpy,joferkington/numpy,b-carter/numpy,skwbc/numpy,grlee77/numpy,naritta/numpy,sigma-random/numpy,pyparallel/numpy,gfyoung/numpy,endolith/numpy,rajathkumarmp/numpy,moreati/numpy,Dapid/numpy,pbrod/numpy,bertrand-l/numpy,mingwpy/numpy,jakirkham/numpy,numpy/numpy-refactor,ChanderG/numpy,mhvk/numpy,mwiebe/numpy,charris/numpy,Anwesh43/numpy,pizzathief/numpy,ViralLeadership/numpy,MichaelAquilina/numpy,has2k1/numpy,astrofrog/numpy,SunghanKim/numpy,cjermain/numpy,bertrand-l/numpy,mhvk/numpy,nbeaver/numpy,tynn/numpy,cowlicks/numpy,tacaswell/numpy,stefanv/numpy,sigma-random/numpy,BMJHayward/numpy,CMartelLML/numpy,bmorris3/numpy,BMJHayward/numpy,jschueller/numpy,mhvk/numpy,ekalosak/numpy,jankoslavic/numpy,joferkington/numpy,trankmichael/numpy,SiccarPoint/numpy,rhythmsosad/numpy,pyparallel/numpy,ekalosak/numpy,dwillmer/numpy,MaPePeR/numpy,jschueller/numpy,Srisai85/numpy,ddasilva/numpy,skymanaditya1/numpy,ContinuumIO/numpy,brandon-rhodes/numpy,nguyentu1602/numpy,chatcannon/numpy,ahaldane/numpy,rmcgibbo/numpy,rgommers/numpy,githubmlai/numpy,endolith/numpy,skwbc/numpy,musically-ut/numpy,kiwifb/numpy,mingwpy/numpy,MichaelAquilina/numpy,dwf/numpy,jonathanunderwood/numpy,ESSS/numpy,numpy/numpy-refactor,numpy/numpy,GaZ3ll3/numpy,GrimDerp/numpy,dato-code/numpy,njase/numpy,astrofrog/numpy,larsmans/numpy,pelson/numpy,ewmoore/numpy,hainm/numpy,SunghanKim/numpy,anntzer/numpy,joferkington/numpy,ogrisel/numpy,dwillmer/numpy,trankmichael/numpy,musically-ut/numpy,jschueller/numpy,shoyer/numpy,maniteja123/numpy,charris/numpy,BMJHayward/numpy,tdsmith/numpy,GaZ3ll3/numpy,pizzathief/numpy,charris/numpy,pdebuyl/numpy,kiwifb/numpy,felipebetancur/numpy,skymanaditya1/numpy,tdsmith/numpy,yiakwy/numpy,jakirkham/numpy,grlee77/numpy,behzadnouri/numpy,pelson/numpy,WarrenWeckesser/numpy,matthew-brett/numpy,ssanderson/numpy,pbrod/numpy,bringingheavendown/numpy,rudimeier/numpy,gfyoung/numpy,bringingheavendown/numpy,embray/numpy,Linkid/numpy,ContinuumIO/numpy,BabeNovelty/numpy,pdebuyl/numpy,MSeifert04/numpy,rudimeier/numpy,jonathanunderwood/numpy,mortada/numpy,trankmichael/numpy,anntzer/numpy,MaPePeR/numpy,SunghanKim/numpy,cowlicks/numpy,pelson/numpy,Linkid/numpy,ewmoore/numpy,tacaswell/numpy,utke1/numpy,matthew-brett/numpy,ajdawson/numpy,sinhrks/numpy,behzadnouri/numpy,KaelChen/numpy,hainm/numpy,leifdenby/numpy,SiccarPoint/numpy,drasmuss/numpy,moreati/numpy,hainm/numpy,embray/numpy,immerrr/numpy,andsor/numpy,andsor/numpy,embray/numpy,has2k1/numpy,jakirkham/numpy,CMartelLML/numpy,rajathkumarmp/numpy,ChristopherHogan/numpy,githubmlai/numpy,ajdawson/numpy,madphysicist/numpy,pbrod/numpy,dato-code/numpy,naritta/numpy,brandon-rhodes/numpy,pdebuyl/numpy,abalkin/numpy,simongibbons/numpy,jorisvandenbossche/numpy,MSeifert04/numpy,Yusa95/numpy,BabeNovelty/numpy,jonathanunderwood/numpy,numpy/numpy-refactor,pdebuyl/numpy,endolith/numpy,utke1/numpy,empeeu/numpy,dch312/numpy,gmcastil/numpy,bringingheavendown/numpy,argriffing/numpy,ogrisel/numpy,yiakwy/numpy,GaZ3ll3/numpy,ahaldane/numpy,matthew-brett/numpy,mingwpy/numpy,andsor/numpy,jankoslavic/numpy,Eric89GXL/numpy,empeeu/numpy,joferkington/numpy,sigma-random/numpy,solarjoe/numpy,MaPePeR/numpy,Eric89GXL/numpy,shoyer/numpy,rgommers/numpy,pizzathief/numpy,GaZ3ll3/numpy,mathdd/numpy,chatcannon/numpy,githubmlai/numpy,utke1/numpy,anntzer/numpy,brandon-rhodes/numpy,jorisvandenbossche/numpy,madphysicist/numpy,SiccarPoint/numpy,yiakwy/numpy,NextThought/pypy-numpy,drasmuss/numpy,dwf/numpy,bmorris3/numpy,grlee77/numpy,ChristopherHogan/numpy,NextThought/pypy-numpy,numpy/numpy-refactor,ChanderG/numpy,numpy/numpy,jschueller/numpy,madphysicist/numpy,mathdd/numpy,ahaldane/numpy,Linkid/numpy,rudimeier/numpy,MichaelAquilina/numpy,leifdenby/numpy,trankmichael/numpy,mindw/numpy,nbeaver/numpy,argriffing/numpy,mathdd/numpy,rherault-insa/numpy,ESSS/numpy,stefanv/numpy,madphysicist/numpy,grlee77/numpy,simongibbons/numpy,pelson/numpy,dato-code/numpy,njase/numpy,sonnyhu/numpy,mattip/numpy,BMJHayward/numpy,SiccarPoint/numpy,pelson/numpy,cjermain/numpy,sinhrks/numpy,rajathkumarmp/numpy,ChanderG/numpy,dwf/numpy,mingwpy/numpy,GrimDerp/numpy,tacaswell/numpy,ewmoore/numpy | numpy/version.py | numpy/version.py | version='1.0b3'
release=False
if not release:
import os
svn_version_file = os.path.join(os.path.dirname(__file__),
'core','__svn_version__.py')
if os.path.isfile(svn_version_file):
import imp
svn = imp.load_module('numpy.core.__svn_version__',
open(svn_version_file),
svn_version_file,
('.py','U',1))
version += '.dev'+svn.version
| version='1.0b2'
release=False
if not release:
import os
svn_version_file = os.path.join(os.path.dirname(__file__),
'core','__svn_version__.py')
if os.path.isfile(svn_version_file):
import imp
svn = imp.load_module('numpy.core.__svn_version__',
open(svn_version_file),
svn_version_file,
('.py','U',1))
version += '.dev'+svn.version
| bsd-3-clause | Python |
2bbc289ce21365e18b04cb865328c494b75075fd | Update head revision to 0.9.9 | Ademan/NumPy-GSoC,jasonmccampbell/numpy-refactor-sprint,illume/numpy3k,Ademan/NumPy-GSoC,chadnetzer/numpy-gaurdro,jasonmccampbell/numpy-refactor-sprint,Ademan/NumPy-GSoC,efiring/numpy-work,jasonmccampbell/numpy-refactor-sprint,teoliphant/numpy-refactor,chadnetzer/numpy-gaurdro,teoliphant/numpy-refactor,chadnetzer/numpy-gaurdro,illume/numpy3k,illume/numpy3k,efiring/numpy-work,chadnetzer/numpy-gaurdro,efiring/numpy-work,teoliphant/numpy-refactor,illume/numpy3k,teoliphant/numpy-refactor,jasonmccampbell/numpy-refactor-sprint,efiring/numpy-work,Ademan/NumPy-GSoC,teoliphant/numpy-refactor | numpy/version.py | numpy/version.py | version='0.9.9'
import os
svn_version_file = os.path.join(os.path.dirname(__file__),
'core','__svn_version__.py')
if os.path.isfile(svn_version_file):
import imp
svn = imp.load_module('numpy.core.__svn_version__',
open(svn_version_file),
svn_version_file,
('.py','U',1))
version += '.'+svn.version
| version='0.9.7'
import os
svn_version_file = os.path.join(os.path.dirname(__file__),
'core','__svn_version__.py')
if os.path.isfile(svn_version_file):
import imp
svn = imp.load_module('numpy.core.__svn_version__',
open(svn_version_file),
svn_version_file,
('.py','U',1))
version += '.'+svn.version
| bsd-3-clause | Python |
d4cf4de3257719cf703c580a3d8d6f50629d2a22 | remove 1011 from valid codes (for now .. until INTERNAL_SERVER_ERROR is official) | luhn/AutobahnPython,magnux/AutobahnPython,inirudebwoy/AutobahnPython,leedm777/AutobahnPython,meejah/AutobahnPython,iffy/AutobahnPython,wrapp/AutobahnPython,flyser/AutobahnPython,D3f0/AutobahnPython,rapyuta/autobahn_rce,rapyuta/autobahn_rce,nucular/AutobahnPython,leedm777/AutobahnPython,Jenselme/AutobahnPython,crossbario/autobahn-python,D3f0/AutobahnPython,D3f0/AutobahnPython,dash-dash/AutobahnPython,boonedox/AutobahnPython,boonedox/AutobahnPython,rapyuta/autobahn_rce,schoonc/AutobahnPython,claws/AutobahnPython,cachedout/AutobahnPython,jvdm/AutobahnPython,crossbario/autobahn-python,tavendo/AutobahnPython,Geoion/AutobahnPython,wrapp/AutobahnPython,mcfletch/AutobahnPython,cachedout/AutobahnPython,flyser/AutobahnPython,bencharb/AutobahnPython,oberstet/autobahn-python,flyser/AutobahnPython,leedm777/AutobahnPython,crossbario/autobahn-python,oberstet/autobahn-python,markope/AutobahnPython,tomwire/AutobahnPython,wrapp/AutobahnPython,magnux/AutobahnPython,RyanHope/AutobahnPython,ewollesen/AutobahnPython,mcfletch/AutobahnPython,oberstet/autobahn-python,hzruandd/AutobahnPython,boonedox/AutobahnPython,iffy/AutobahnPython,rapyuta/autobahn_rce,ttimon7/AutobahnPython,dash-dash/AutobahnPython,iffy/AutobahnPython,iffy/AutobahnPython,claws/AutobahnPython,dash-dash/AutobahnPython,ewollesen/AutobahnPython,dash-dash/AutobahnPython | lib/python/autobahn/case/case7_7_X.py | lib/python/autobahn/case/case7_7_X.py | ###############################################################################
##
## Copyright 2011 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from case import Case
## list of some valid close codes
tests = [1000,1001,1002,1003,1007,1008,1009,1010,3000,3999,4000,4999]
Case7_7_X = []
def __init__(self, protocol):
Case.__init__(self, protocol)
def onOpen(self):
self.expected[Case.OK] = []
self.expectedClose = {"failedByMe":True,"closeCode":[self.p.CLOSE_STATUS_CODE_NORMAL,self.CLOSE_CODE],"requireClean":True}
self.p.sendCloseFrame(self.CLOSE_CODE, reasonUtf8 = "")
self.p.killAfter(1)
i = 1
for s in tests:
DESCRIPTION = """Send close with close code %d""" % s
EXPECTATION = """Clean close with normal or echoed code"""
C = type("Case7_7_%d" % i,
(object, Case, ),
{"CLOSE_CODE": s,
"DESCRIPTION": """%s""" % DESCRIPTION,
"EXPECTATION": """%s""" % EXPECTATION,
"__init__": __init__,
"onOpen": onOpen,
})
Case7_7_X.append(C)
i += 1
| ###############################################################################
##
## Copyright 2011 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from case import Case
## list of (payload length, message count, case timeout)
tests = [1000,1001,1002,1003,1007,1008,1009,1010,1011,3000,4999]
Case7_7_X = []
def __init__(self, protocol):
Case.__init__(self, protocol)
def onOpen(self):
self.expected[Case.OK] = []
self.expectedClose = {"failedByMe":True,"closeCode":[self.p.CLOSE_STATUS_CODE_NORMAL,self.CLOSE_CODE],"requireClean":True}
self.p.sendCloseFrame(self.CLOSE_CODE, reasonUtf8 = "")
self.p.killAfter(1)
i = 1
for s in tests:
DESCRIPTION = """Send close with close code %d""" % s
EXPECTATION = """Clean close with normal or echoed code"""
C = type("Case7_7_%d" % i,
(object, Case, ),
{"CLOSE_CODE": s,
"DESCRIPTION": """%s""" % DESCRIPTION,
"EXPECTATION": """%s""" % EXPECTATION,
"__init__": __init__,
"onOpen": onOpen,
})
Case7_7_X.append(C)
i += 1
| mit | Python |
547691a1fe7d0618047c311341acd35c526827ac | Update to version v0.12.1 | pignacio/python-nvd3,vdloo/python-nvd3,liang42hao/python-nvd3,vdloo/python-nvd3,yelster/python-nvd3,Coxious/python-nvd3,BibMartin/python-nvd3,yelster/python-nvd3,BibMartin/python-nvd3,liang42hao/python-nvd3,pignacio/python-nvd3,mgx2/python-nvd3,mgx2/python-nvd3,oz123/python-nvd3,oz123/python-nvd3,mgx2/python-nvd3,pignacio/python-nvd3,liang42hao/python-nvd3,BibMartin/python-nvd3,vdloo/python-nvd3,yelster/python-nvd3,Coxious/python-nvd3,oz123/python-nvd3,Coxious/python-nvd3 | nvd3/__init__.py | nvd3/__init__.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Python-nvd3 is a Python wrapper for NVD3 graph library.
NVD3 is an attempt to build re-usable charts and chart components
for d3.js without taking away the power that d3.js gives you.
Project location : https://github.com/areski/python-nvd3
"""
__version__ = '0.12.1' # edit also docs/source/conf.py
__all__ = ['lineChart', 'pieChart', 'lineWithFocusChart',
'stackedAreaChart', 'multiBarHorizontalChart',
'linePlusBarChart', 'cumulativeLineChart',
'scatterChart', 'discreteBarChart', 'multiBarChart',
'linePlusBarWithFocusChart']
from .lineChart import lineChart
from .pieChart import pieChart
from .lineWithFocusChart import lineWithFocusChart
from .stackedAreaChart import stackedAreaChart
from .multiBarHorizontalChart import multiBarHorizontalChart
from .linePlusBarChart import linePlusBarChart
from .cumulativeLineChart import cumulativeLineChart
from .scatterChart import scatterChart
from .discreteBarChart import discreteBarChart
from .multiBarChart import multiBarChart
from .linePlusBarWithFocusChart import linePlusBarWithFocusChart
from . import ipynb
| #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Python-nvd3 is a Python wrapper for NVD3 graph library.
NVD3 is an attempt to build re-usable charts and chart components
for d3.js without taking away the power that d3.js gives you.
Project location : https://github.com/areski/python-nvd3
"""
__version__ = '0.12.0' # edit also docs/source/conf.py
__all__ = ['lineChart', 'pieChart', 'lineWithFocusChart',
'stackedAreaChart', 'multiBarHorizontalChart',
'linePlusBarChart', 'cumulativeLineChart',
'scatterChart', 'discreteBarChart', 'multiBarChart',
'linePlusBarWithFocusChart']
from .lineChart import lineChart
from .pieChart import pieChart
from .lineWithFocusChart import lineWithFocusChart
from .stackedAreaChart import stackedAreaChart
from .multiBarHorizontalChart import multiBarHorizontalChart
from .linePlusBarChart import linePlusBarChart
from .cumulativeLineChart import cumulativeLineChart
from .scatterChart import scatterChart
from .discreteBarChart import discreteBarChart
from .multiBarChart import multiBarChart
from .linePlusBarWithFocusChart import linePlusBarWithFocusChart
from . import ipynb
| mit | Python |
7bf68cbafe16a95342e2c74c28f6b2b0daad65a8 | Update 0.2.8 | keras-team/keras-cv,keras-team/keras-cv,keras-team/keras-cv | keras_cv/__init__.py | keras_cv/__init__.py | # Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv import layers
from keras_cv import metrics
from keras_cv import models
from keras_cv import utils
from keras_cv import version_check
from keras_cv.core import ConstantFactorSampler
from keras_cv.core import FactorSampler
from keras_cv.core import NormalFactorSampler
from keras_cv.core import UniformFactorSampler
version_check.check_tf_version()
__version__ = "0.2.8"
| # Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv import layers
from keras_cv import metrics
from keras_cv import models
from keras_cv import utils
from keras_cv import version_check
from keras_cv.core import ConstantFactorSampler
from keras_cv.core import FactorSampler
from keras_cv.core import NormalFactorSampler
from keras_cv.core import UniformFactorSampler
version_check.check_tf_version()
__version__ = "0.2.8dev"
| apache-2.0 | Python |
e15eeb05f93dd4745d4a14422c5e15ffc3cc3119 | remove demo url | openstax/openstax-cms,Connexions/openstax-cms,openstax/openstax-cms,openstax/openstax-cms,Connexions/openstax-cms,openstax/openstax-cms | openstax/urls.py | openstax/urls.py | from django.conf.urls import include, url
from django.conf.urls.static import static
from django.conf import settings
from django.contrib import admin
from wagtail.wagtailadmin import urls as wagtailadmin_urls
from wagtail.wagtaildocs import urls as wagtaildocs_urls
from wagtail.wagtailcore import urls as wagtail_urls
from wagtail.contrib.wagtailapi import urls as wagtailapi_urls
urlpatterns = [
url(r'^django-admin/', include(admin.site.urls)),
url(r'^admin/', include(wagtailadmin_urls)),
url(r'^documents/', include(wagtaildocs_urls)),
url(r'^api/', include(wagtailapi_urls)),
# For anything not caught by a more specific rule above, hand over to
# Wagtail's serving mechanism
url(r'', include(wagtail_urls)),
]
if settings.DEBUG:
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.views.generic.base import RedirectView
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += [
url(r'^favicon\.ico$', RedirectView.as_view(url=settings.STATIC_URL + 'pages/images/favicon.ico'))
]
| from django.conf.urls import include, url
from django.conf.urls.static import static
from django.conf import settings
from django.contrib import admin
from wagtail.wagtailadmin import urls as wagtailadmin_urls
from wagtail.wagtaildocs import urls as wagtaildocs_urls
from wagtail.wagtailcore import urls as wagtail_urls
from wagtail.contrib.wagtailapi import urls as wagtailapi_urls
from demo import views
urlpatterns = [
url(r'^django-admin/', include(admin.site.urls)),
url(r'^admin/', include(wagtailadmin_urls)),
url(r'^documents/', include(wagtaildocs_urls)),
url(r'search/$', views.search, name='search'),
url(r'^api/', include(wagtailapi_urls)),
# For anything not caught by a more specific rule above, hand over to
# Wagtail's serving mechanism
url(r'', include(wagtail_urls)),
]
if settings.DEBUG:
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.views.generic.base import RedirectView
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += [
url(r'^favicon\.ico$', RedirectView.as_view(url=settings.STATIC_URL + 'demo/images/favicon.ico'))
]
| agpl-3.0 | Python |
7a49e7c4344f7d78a84644ade5ca1c3251065f4a | Use `glob.glob` instead of `os.walk` | saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt | salt/grains/ssds.py | salt/grains/ssds.py | # -*- coding: utf-8 -*-
'''
Detect SSDs
'''
# Import python libs
import glob
import salt.utils
import logging
log = logging.getLogger(__name__)
def ssds():
'''
Return list of disk devices that are SSD (non-rotational)
'''
ssd_devices = []
for entry in glob.glob('/sys/block/*/queue/rotational'):
with salt.utils.fopen(entry) as entry_fp:
device = entry.split('/')[3]
flag = entry_fp.read(1)
if flag == '0':
ssd_devices.append(device)
log.debug('Device {0} reports itself as an SSD'.format(device))
elif flag == '1':
log.debug('Device {0} does not report itself as an SSD'.format(device))
else:
log.debug('Unable to identify device {0} as an SSD or not. It does not report 0 or 1'.format(device))
return {'SSDs': ssd_devices}
| # -*- coding: utf-8 -*-
'''
Detect SSDs
'''
import os
import salt.utils
import logging
log = logging.getLogger(__name__)
def ssds():
'''
Return list of disk devices that are SSD (non-rotational)
'''
SSDs = []
for subdir, dirs, files in os.walk('/sys/block'):
for dir in dirs:
flagfile = subdir + '/' + dir + '/queue/rotational'
if os.path.isfile(flagfile):
with salt.utils.fopen(flagfile, 'r') as _fp:
flag = _fp.read(1)
if flag == '0':
SSDs.append(dir)
log.info(dir + ' is a SSD')
elif flag == '1':
log.info(dir + ' is no SSD')
else:
log.warning(flagfile + ' does not report 0 or 1')
log.debug(flagfile + ' reports ' + flag)
else:
log.warning(flagfile + ' does not exist for ' + dir)
return {'SSDs': SSDs}
| apache-2.0 | Python |
f11a21ae5dd25310ac8165759ceb532de1365f64 | update default config options | saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt | saltcloud/config.py | saltcloud/config.py | '''
Manage configuration files in salt-cloud
'''
# Import python libs
import os
# Import salt libs
import salt.config
def cloud_config(path):
'''
Read in the salt cloud config and return the dict
'''
opts = {# Provider defaults
'provider': '',
'location': '',
# User/Passwords/keys
'RACKSPACE.key': '',
'RACKSPACE.user': '',
'LINODE.apikey': '',
'EC2.id': '',
'EC2.key': '',
'EC2.keyname': '',
'EC2.securitygroup': '',
# Global defaults
'ssh_auth': '',
'keysize': 4096,
'os': '',
}
salt.config.load_config(opts, path, 'SALT_CLOUD_CONFIG')
if 'include' in opts:
opts = salt.config.include_config(opts, path)
return opts
def vm_config(path):
'''
Read in the salt cloud vm config file
'''
# No defaults
opts = {}
salt.config.load_config(opts, path, 'SALT_CLOUDVM_CONFIG')
if 'include' in opts:
opts = salt.config.include_config(opts, path)
vms = []
if 'conf_file' in opts:
opts.pop('conf_file')
for key, val in opts.items():
val['name'] = key
vms.append(val)
return vms
| '''
Manage configuration files in salt-cloud
'''
# Import python libs
import os
# Import salt libs
import salt.config
def cloud_config(path):
'''
Read in the salt cloud config and return the dict
'''
opts = {# Provider defaults
'provider': '',
'location': '',
# User/Passwords/keys
'RACKSPACE_key': '',
'RACKSPACE_user': '',
'LINODE_apikey': '',
'EC2_key': '',
'EC2_user': '',
# Global defaults
'ssh_auth': '',
'keysize': 4096,
'os': '',
}
salt.config.load_config(opts, path, 'SALT_CLOUD_CONFIG')
if 'include' in opts:
opts = include_config(opts, path)
return opts
def vm_config(path):
'''
Read in the salt cloud vm config file
'''
# No defaults
opts = {}
salt.config.load_config(opts, path, 'SALT_CLOUDVM_CONFIG')
if 'include' in opts:
opts = include_config(opts, path)
vms = []
if 'conf_file' in opts:
opts.pop('conf_file')
for key, val in opts.items():
val['name'] = key
vms.append(val)
return vms
| apache-2.0 | Python |
cdd57a8706ca615af85d298ba3a99c80f1e9dba0 | Update mono.py to get 2.10.5 | bl8/bockbuild,BansheeMediaPlayer/bockbuild,mono/bockbuild,BansheeMediaPlayer/bockbuild,BansheeMediaPlayer/bockbuild,mono/bockbuild,bl8/bockbuild,bl8/bockbuild | packages/mono.py | packages/mono.py | class MonoPackage (Package):
def __init__ (self):
Package.__init__ (self, 'mono', '2.10.5',
sources = [
'http://download.mono-project.com/sources/%{name}/%{name}-%{version}.tar.bz2',
'patches/mono-runtime-relocation.patch'
],
configure_flags = [
'--with-jit=yes',
'--with-ikvm=no',
'--with-mcs-docs=no',
'--with-moonlight=no',
'--enable-quiet-build'
]
)
# Mono (in libgc) likes to fail to build randomly
self.make = 'for i in 1 2 3 4 5 6 7 8 9 10; do make && break; done'
# def prep (self):
# Package.prep (self)
# self.sh ('patch -p1 < "%{sources[1]}"')
def install (self):
Package.install (self)
if Package.profile.name == 'darwin':
self.sh ('sed -ie "s/libcairo.so.2/libcairo.2.dylib/" "%{prefix}/etc/mono/config"')
MonoPackage ()
| class MonoPackage (Package):
def __init__ (self):
Package.__init__ (self, 'mono', '2.10.4',
sources = [
'http://download.mono-project.com/sources/%{name}/%{name}-%{version}.tar.bz2',
'patches/mono-runtime-relocation.patch'
],
configure_flags = [
'--with-jit=yes',
'--with-ikvm=no',
'--with-mcs-docs=no',
'--with-moonlight=no',
'--enable-quiet-build'
]
)
# Mono (in libgc) likes to fail to build randomly
self.make = 'for i in 1 2 3 4 5 6 7 8 9 10; do make && break; done'
# def prep (self):
# Package.prep (self)
# self.sh ('patch -p1 < "%{sources[1]}"')
def install (self):
Package.install (self)
if Package.profile.name == 'darwin':
self.sh ('sed -ie "s/libcairo.so.2/libcairo.2.dylib/" "%{prefix}/etc/mono/config"')
MonoPackage ()
| mit | Python |
89d8ee0b91c9fd579dcf965e9e07f18954625c72 | Add support for manual journals | wegotpop/pyxero,jarekwg/pyxero,jaymcconnell/pyxero,opendesk/pyxero,thisismyrobot/pyxero,freakboy3742/pyxero,MJMortimer/pyxero,unomena/pyxero,schinckel/pyxero,unomena/pyxeropos,jacobg/pyxero,direvus/pyxero | xero/api.py | xero/api.py | from .manager import Manager
class Xero(object):
"""An ORM-like interface to the Xero API"""
OBJECT_LIST = (u'Contacts', u'Accounts', u'CreditNotes',
u'Currencies', u'Invoices', u'Items', u'Organisation',
u'Payments', u'TaxRates', u'TrackingCategories', u'ManualJournals')
def __init__(self, credentials):
# Iterate through the list of objects we support, for
# each of them create an attribute on our self that is
# the lowercase name of the object and attach it to an
# instance of a Manager object to operate on it
for name in self.OBJECT_LIST:
setattr(self, name.lower(), Manager(name, credentials.oauth))
| from .manager import Manager
class Xero(object):
"""An ORM-like interface to the Xero API"""
OBJECT_LIST = (u'Contacts', u'Accounts', u'CreditNotes',
u'Currencies', u'Invoices', u'Items', u'Organisation',
u'Payments', u'TaxRates', u'TrackingCategories')
def __init__(self, credentials):
# Iterate through the list of objects we support, for
# each of them create an attribute on our self that is
# the lowercase name of the object and attach it to an
# instance of a Manager object to operate on it
for name in self.OBJECT_LIST:
setattr(self, name.lower(), Manager(name, credentials.oauth))
| bsd-3-clause | Python |
98c011af0ee3413f5c6d1cfdbfdd8a9d8ed59b86 | Revise to sum_deq & revise comment | bowen0701/algorithms_data_structures | lc0067_add_binary.py | lc0067_add_binary.py | """Leetcode 67. Add Binary
Easy
URL: https://leetcode.com/problems/add-binary/
Given two binary strings, return their sum (also a binary string).
The input strings are both non-empty and contains only characters 1 or 0.
Example 1:
Input: a = "11", b = "1"
Output: "100"
Example 2:
Input: a = "1010", b = "1011"
Output: "10101"
"""
class SolutionIter(object):
def _padding(self, a, b):
if len(a) < len(b):
a = '0' * (len(b) - len(a)) + a
elif len(a) > len(b):
b = '0' * (len(a) - len(b)) + b
return a, b
def addBinary(self, a, b):
"""
:type a: str
:type b: str
:rtype: str
Time complexity: O(n), where n is the length of the longer string.
Space complexity: O(1).
"""
from collections import deque
# Normalize a and b to equal length by padding 0's to shorer one.
a, b = self._padding(a, b)
# Add binary from backward if not out of boundary or exists carry.
sum_deq = deque([])
carry = 0
i = len(a) - 1
while i >= 0 or carry > 0:
if i >= 0:
val = int(a[i]) + int(b[i]) + carry
else:
val = carry
carry, val = val // 2, val % 2
sum_deq.appendleft(str(val))
i -= 1
return ''.join(list(sum_deq))
def main():
# Output: "100"
a = "11"
b = "1"
print SolutionIter().addBinary(a, b)
# Output: "10101"
a = "1010"
b = "1011"
print SolutionIter().addBinary(a, b)
if __name__ == '__main__':
main()
| """Leetcode 67. Add Binary
Easy
URL: https://leetcode.com/problems/add-binary/
Given two binary strings, return their sum (also a binary string).
The input strings are both non-empty and contains only characters 1 or 0.
Example 1:
Input: a = "11", b = "1"
Output: "100"
Example 2:
Input: a = "1010", b = "1011"
Output: "10101"
"""
class SolutionIter(object):
def _padding(self, a, b):
if len(a) < len(b):
a = '0' * (len(b) - len(a)) + a
elif len(a) > len(b):
b = '0' * (len(a) - len(b)) + b
return a, b
def addBinary(self, a, b):
"""
:type a: str
:type b: str
:rtype: str
Time complexity: O(n), where n is the length of the longer string.
Space complexity: O(1).
"""
from collections import deque
# Normalize a and b to equal size by padding 0's to shorer one.
a, b = self._padding(a, b)
# Add binary from backward if not out of boundary or exists carry.
sum_arr = deque([])
carry = 0
i = len(a) - 1
while i >= 0 or carry > 0:
if i >= 0:
val = int(a[i]) + int(b[i]) + carry
else:
val = carry
carry, val = val // 2, val % 2
sum_arr.appendleft(str(val))
i -= 1
return ''.join(list(sum_arr))
def main():
# Output: "100"
a = "11"
b = "1"
print SolutionIter().addBinary(a, b)
# Output: "10101"
a = "1010"
b = "1011"
print SolutionIter().addBinary(a, b)
if __name__ == '__main__':
main()
| bsd-2-clause | Python |
3ac16f20bbb4eef6607111ac5777f7db21095412 | Add functiion for emotion recognizing | sevazhidkov/lksh-analyse | parse_poldnev.py | parse_poldnev.py | """
Script that parsing poldnev.ru, checking students factors
using Emotion API and creating 'visits.csv'.
@author: Seva Zhidkov
@license: MIT
"""
import os
import time
import csv
import json
import re
import requests
import lxml.html
POLDNEV_BASE_URL = 'http://poldnev.ru/lksh/id{}'
STUDENT_IMAGE_REGEX = re.compile(r'href=\\"https?://img-fotki.yandex.ru/get/.*_XXL')
OCP_API_KEY = os.environ['OCP_API_KEY']
EMOTION_API_URL = 'https://api.projectoxford.ai/emotion/v1.0/recognize'
def detect_emotions(photo_url):
# Emotion API limits
time.sleep(2)
response = requests.post(EMOTION_API_URL, headers={
'Content-Type': 'application/json',
'Ocp-Apim-Subscription-Key': OCP_API_KEY
}, data=json.dumps({'url': photo_url}))
faces = json.loads(response.text)
# If there are few faces, sort it by square of face rectangle
faces.sort(
key=lambda x: x['faceRectangle']['width'] * x['faceRectangle']['height'],
reverse=True
)
return list(faces[0]['scores'].values())
print('Starting analysing data from poldnev.ru')
visits_file = open('visits.csv', 'w')
visits_writer = csv.writer(visits_file)
# First student id
student_id = 1
while True:
student_page = requests.get(POLDNEV_BASE_URL.format(student_id)).text
student_html = lxml.html.document_fromstring(student_page)
student_name = str(student_html.find_class('header-center').pop().text_content())
if 'Ошибка' in student_name:
break
student_photos = []
for photo in re.findall(STUDENT_IMAGE_REGEX, student_page):
# Delete a beginning ('href="') of the string
student_photos.append(photo[7:])
# First row - names of columns
visits_rows = student_html.get_element_by_id('person-table').getchildren()[1:]
photo_num = 0
for row in visits_rows:
# row[1] - year and month, row[2] - position
visit_date = row[1].text_content()
visit_position = row[2].text_content()
# If there is no photo for this visit - continue to next student visit
if 'class' not in row.attrib:
continue
print(student_id, student_name, visit_date,
visit_position, detect_emotions(student_photos[photo_num]))
photo_num += 1
student_id += 1
visits_file.close()
| """
Script that parsing poldnev.ru, checking students factors
using Emotion API and creating 'visits.csv'.
@author: Seva Zhidkov
@license: MIT
"""
import csv
import json
import re
import requests
import lxml.html
def detect_emotions(photo_url):
pass
POLDNEV_BASE_URL = 'http://poldnev.ru/lksh/id{}'
STUDENT_IMAGE_REGEX = re.compile(r'href=\\"https?://img-fotki.yandex.ru/get/.*_XXL')
print('Starting analysing data from poldnev.ru')
visits_file = open('visits.csv', 'w')
visits_writer = csv.writer(visits_file)
# First student id
student_id = 1
while True:
student_page = requests.get(POLDNEV_BASE_URL.format(student_id)).text
student_html = lxml.html.document_fromstring(student_page)
student_name = str(student_html.find_class('header-center').pop().text_content())
if 'Ошибка' in student_name:
break
student_photos = []
for photo in re.findall(STUDENT_IMAGE_REGEX, student_page):
# Delete a beginning ('href="') of the string
student_photos.append(photo[7:])
# First row - names of columns
visits_rows = student_html.get_element_by_id('person-table').getchildren()[1:]
photo_num = 0
for row in visits_rows:
# row[1] - year and month, row[2] - position
visit_date = row[1].text_content()
visit_position = row[2].text_content()
# If there is no photo for this visit - continue to next student visit
if 'class' not in row.attrib:
continue
print(student_id, student_name, visit_date,
visit_position, student_photos[photo_num])
photo_num += 1
student_id += 1
visits_file.close()
| mit | Python |
1ab5d60d6e49226f7dfd17416993b9460e5b688f | Add basic view for index page with no context | jwarren116/RoadTrip,jwarren116/RoadTrip,jwarren116/RoadTrip | planner/views.py | planner/views.py | from django.shortcuts import render
def index(request):
return render(request, 'base.html')
| from django.shortcuts import render
# Create your views here.
| apache-2.0 | Python |
511c7c858d6cd99b66b0cd7fc187dc73034bb3fa | Check for existence of isatty on stdout before calling it | thaim/ansible,thaim/ansible | lib/ansible/color.py | lib/ansible/color.py | # (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
ANSIBLE_COLOR=True
if os.getenv("ANSIBLE_NOCOLOR") is not None:
ANSIBLE_COLOR=False
elif not hasattr(sys.stdout, 'isatty') or not sys.stdout.isatty():
ANSIBLE_COLOR=False
else:
try:
import curses
curses.setupterm()
if curses.tigetnum('colors') < 0:
ANSIBLE_COLOR=False
except ImportError:
# curses library was not found
pass
except curses.error:
# curses returns an error (e.g. could not find terminal)
ANSIBLE_COLOR=False
if os.getenv("ANSIBLE_FORCE_COLOR") is not None:
ANSIBLE_COLOR=True
# --- begin "pretty"
#
# pretty - A miniature library that provides a Python print and stdout
# wrapper that makes colored terminal text easier to use (eg. without
# having to mess around with ANSI escape sequences). This code is public
# domain - there is no license except that you must leave this header.
#
# Copyright (C) 2008 Brian Nez <thedude at bri1 dot com>
#
# http://nezzen.net/2008/06/23/colored-text-in-python-using-ansi-escape-sequences/
codeCodes = {
'black': '0;30', 'bright gray': '0;37',
'blue': '0;34', 'white': '1;37',
'green': '0;32', 'bright blue': '1;34',
'cyan': '0;36', 'bright green': '1;32',
'red': '0;31', 'bright cyan': '1;36',
'purple': '0;35', 'bright red': '1;31',
'yellow': '0;33', 'bright purple': '1;35',
'dark gray': '1;30', 'bright yellow': '1;33',
'normal': '0'
}
def stringc(text, color):
"""String in color."""
if ANSIBLE_COLOR:
return "\033["+codeCodes[color]+"m"+text+"\033[0m"
else:
return text
# --- end "pretty"
| # (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
ANSIBLE_COLOR=True
if os.getenv("ANSIBLE_NOCOLOR") is not None:
ANSIBLE_COLOR=False
elif not sys.stdout.isatty():
ANSIBLE_COLOR=False
else:
try:
import curses
curses.setupterm()
if curses.tigetnum('colors') < 0:
ANSIBLE_COLOR=False
except ImportError:
# curses library was not found
pass
except curses.error:
# curses returns an error (e.g. could not find terminal)
ANSIBLE_COLOR=False
if os.getenv("ANSIBLE_FORCE_COLOR") is not None:
ANSIBLE_COLOR=True
# --- begin "pretty"
#
# pretty - A miniature library that provides a Python print and stdout
# wrapper that makes colored terminal text easier to use (eg. without
# having to mess around with ANSI escape sequences). This code is public
# domain - there is no license except that you must leave this header.
#
# Copyright (C) 2008 Brian Nez <thedude at bri1 dot com>
#
# http://nezzen.net/2008/06/23/colored-text-in-python-using-ansi-escape-sequences/
codeCodes = {
'black': '0;30', 'bright gray': '0;37',
'blue': '0;34', 'white': '1;37',
'green': '0;32', 'bright blue': '1;34',
'cyan': '0;36', 'bright green': '1;32',
'red': '0;31', 'bright cyan': '1;36',
'purple': '0;35', 'bright red': '1;31',
'yellow': '0;33', 'bright purple': '1;35',
'dark gray': '1;30', 'bright yellow': '1;33',
'normal': '0'
}
def stringc(text, color):
"""String in color."""
if ANSIBLE_COLOR:
return "\033["+codeCodes[color]+"m"+text+"\033[0m"
else:
return text
# --- end "pretty"
| mit | Python |
e6d92aa8d56d670109daecb9a8f0e22d2f88f50c | Update debug middleware for Python 3. | ghostwords/localore,ghostwords/localore,ghostwords/localore | localore/localore/middleware/debug.py | localore/localore/middleware/debug.py | import json
from django.http import HttpResponse
# from http://stackoverflow.com/a/19249559
class NonHtmlDebugToolbarMiddleware(object):
"""
The Django Debug Toolbar usually only works for views that return HTML.
This middleware wraps any non-HTML response in HTML if the request
has a 'debug' query parameter (e.g. http://localhost/foo?debug)
Special handling for json (pretty printing) and
binary data (only show data length)
"""
@staticmethod
def process_response(request, response):
if request.GET.get('debug') == '':
if response['Content-Type'] == 'application/octet-stream':
new_content = '<html><body>Binary Data, ' \
'Length: {}</body></html>'.format(len(response.content))
response = HttpResponse(new_content)
elif response['Content-Type'] != 'text/html':
content = response.content.decode('utf-8')
try:
json_ = json.loads(content)
content = json.dumps(json_, sort_keys=True, indent=2)
except ValueError:
pass
response = HttpResponse('<html><body><pre>{}'
'</pre></body></html>'.format(content))
return response
| import json
from django.http import HttpResponse
# from http://stackoverflow.com/a/19249559
class NonHtmlDebugToolbarMiddleware(object):
"""
The Django Debug Toolbar usually only works for views that return HTML.
This middleware wraps any non-HTML response in HTML if the request
has a 'debug' query parameter (e.g. http://localhost/foo?debug)
Special handling for json (pretty printing) and
binary data (only show data length)
"""
@staticmethod
def process_response(request, response):
if request.GET.get('debug') == '':
if response['Content-Type'] == 'application/octet-stream':
new_content = '<html><body>Binary Data, ' \
'Length: {}</body></html>'.format(len(response.content))
response = HttpResponse(new_content)
elif response['Content-Type'] != 'text/html':
content = response.content
try:
json_ = json.loads(content)
content = json.dumps(json_, sort_keys=True, indent=2)
except ValueError:
pass
response = HttpResponse('<html><body><pre>{}'
'</pre></body></html>'.format(content))
return response
| mpl-2.0 | Python |
3a7bd1eb2a91861ba044b46fada158034ccd6b23 | Fix Tests | meine-stadt-transparent/meine-stadt-transparent,meine-stadt-transparent/meine-stadt-transparent,meine-stadt-transparent/meine-stadt-transparent,meine-stadt-transparent/meine-stadt-transparent | mainapp/tests/test_document_access.py | mainapp/tests/test_document_access.py | from django.test import TestCase, override_settings
from mainapp.models import Paper, File
@override_settings(ELASTICSEARCH_DSL_AUTOSYNC=False, ELASTICSEARCH_DSL_AUTO_REFRESH=False)
class TestDocumentAccess(TestCase):
fixtures = ['initdata']
base_paper_len = 2
def test_delete_document(self):
paper = Paper.objects.get(pk=1)
file = File.objects.get(pk=2)
file_papers = file.paper_set.all()
self.assertEqual(self.base_paper_len, len(file_papers))
# Now we delete the paper
paper.deleted = True
paper.save()
file_papers = file.paper_set.all()
self.assertEqual(self.base_paper_len - 1, len(file_papers))
with self.assertRaises(Paper.DoesNotExist):
Paper.objects.get(pk=1)
# Now we restore it
deleted_paper = Paper.objects_with_deleted.get(pk=1)
deleted_paper.deleted = False
deleted_paper.save()
paper = Paper.objects.get(pk=1)
self.assertEqual(1, paper.id)
file_papers = file.paper_set.all()
self.assertEqual(self.base_paper_len, len(file_papers))
| from django.test import TestCase, override_settings
from mainapp.models import Paper, File
@override_settings(ELASTICSEARCH_DSL_AUTOSYNC=False, ELASTICSEARCH_DSL_AUTO_REFRESH=False)
class TestDocumentAccess(TestCase):
fixtures = ['initdata']
base_paper_len = 2
def test_delete_document(self):
paper = Paper.objects.get(pk=1)
file = File.objects.get(pk=2)
file_papers = file.paper_set.all()
self.assertEqual(self.base_paper_len, len(file_papers))
# Now we delete the paper
paper.deleted = True
paper.save()
file_papers = file.paper_set.all()
self.assertEqual(0, len(file_papers))
with self.assertRaises(Paper.DoesNotExist):
Paper.objects.get(pk=1)
# Now we restore it
deleted_paper = Paper.objects_with_deleted.get(pk=1)
deleted_paper.deleted = False
deleted_paper.save()
paper = Paper.objects.get(pk=1)
self.assertEqual(1, paper.id)
file_papers = file.paper_set.all()
self.assertEqual(1, len(file_papers))
| mit | Python |
802a8ec31cb37a38f976917674ed5604599a1848 | add long_description | rmulvey/mc,cortlandstarrett/mc,rmulvey/mc,leviathan747/mc,rmulvey/mc,leviathan747/mc,xtuml/mc,leviathan747/mc,cortlandstarrett/mc,xtuml/mc,lwriemen/mc,xtuml/mc,xtuml/mc,lwriemen/mc,rmulvey/mc,lwriemen/mc,lwriemen/mc,leviathan747/mc,xtuml/mc,rmulvey/mc,cortlandstarrett/mc,lwriemen/mc,cortlandstarrett/mc,leviathan747/mc,cortlandstarrett/mc,lwriemen/mc,xtuml/mc,rmulvey/mc,leviathan747/mc,cortlandstarrett/mc | pymc/setup.py | pymc/setup.py | #!/usr/bin/env python
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(name='pymc3020',
version='1.0.1',
description='Python executor for MC-3020',
long_description=long_description,
long_description_content_type='text/markdown',
author='Levi Starrett',
author_email='levi@roxsoftware.com',
url='https://github.com/xtuml/mc',
license='Apache 2.0',
download_url='https://github.com/xtuml/mc/releases/download/1.0.1/pymc3020-1.0.1.tar.gz',
keywords='xtuml bridgepoint',
packages=['mc3020'],
install_requires=['pyxtuml', 'pyrsl'],
include_package_data=True,
zip_safe=False)
| #!/usr/bin/env python
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(name='pymc3020',
version='1.0.1',
description='Python executor for MC-3020',
author='Levi Starrett',
author_email='levi@roxsoftware.com',
url='https://github.com/xtuml/mc',
license='Apache 2.0',
download_url='https://github.com/xtuml/mc/releases/download/1.0.1/pymc3020-1.0.1.tar.gz',
keywords='xtuml bridgepoint',
packages=['mc3020'],
install_requires=['pyxtuml', 'pyrsl'],
include_package_data=True,
zip_safe=False)
| apache-2.0 | Python |
38748da362f484c3f84dfa63c8ff052ec1661820 | Update main.py | clccmh/pomodoro | pomodoro/main.py | pomodoro/main.py | #!/usr/bin/env python
import click
import progressbar
import time
@click.command()
@click.option('--minutes', default=25, help='Number of minutes, default 25.')
def pomodoro(minutes):
bar = progressbar.ProgressBar(widgets=[
progressbar.Bar(),
])
for i in bar(range(minutes*60)):
time.sleep(1)
if __name__ == '__main__':
pomodoro()
| #!/usr/bin/env python
__version__ = '0.0.1'
import click
import progressbar
import time
@click.command()
@click.option('--minutes', default=25, help='Number of minutes, default 25.')
def pomodoro(minutes):
bar = progressbar.ProgressBar(widgets=[
progressbar.Bar(),
])
for i in bar(range(minutes*60)):
time.sleep(1)
if __name__ == '__main__':
pomodoro()
| mit | Python |
1e0763c97100f4efb1db4c72c200f759bfde9a56 | update scheduler cron | niqdev/packtpub-crawler,niqdev/packtpub-crawler,niqdev/packtpub-crawler | script/scheduler.py | script/scheduler.py | from apscheduler.schedulers.blocking import BlockingScheduler
import os
import shutil
sched = BlockingScheduler()
#@sched.scheduled_job('interval', minutes=5)
@sched.scheduled_job('cron', day_of_week='mon-sun', hour=10)
def scheduled_job():
print('New job: packtpub-crawler')
#print(os.listdir(os.curdir))
shutil.rmtree('./ebooks', ignore_errors=True)
os.system('python script/spider.py --config config/prod.cfg --upload drive --notify')
sched.start()
| from apscheduler.schedulers.blocking import BlockingScheduler
import os
import shutil
sched = BlockingScheduler()
#@sched.scheduled_job('interval', minutes=5)
@sched.scheduled_job('cron', hour=9)
def scheduled_job():
print('New job: packtpub-crawler')
#print(os.listdir(os.curdir))
shutil.rmtree('./ebooks', ignore_errors=True)
os.system('python script/spider.py --config config/prod.cfg --upload drive --notify')
sched.start()
| mit | Python |
95f1438d88865ca5619ca4759f621d778ed75483 | fix pgw_shop_id | logithr/django-htpayway,logithr/django-htpayway | project/utils.py | project/utils.py | from htpayway import PayWay
from mock import Mock
from decimal import Decimal
class ThisPayWay(PayWay):
pgw_shop_id = '20000185'
pgw_secret_key = "pZclhO{2G+RlMR#FWX{9g5'C"
def set_order(self, order):
# mock data
self.order = Mock(name='order')
self.order.id = '11'
self.order.first_name = 'Igor'
self.order.last_name = 'Pejic'
self.order.street = 'Bujska'
self.order.city = 'Rijeka'
self.order.post_code = '51000'
self.order.country = 'Hrvatska'
self.order.telephone = '0992347823'
self.order.email = 'dev-support@logit.hr'
self.order.amount = Decimal('230.30')
# url na koji se vraca nakon placanja
def set_request(self, request):
self.pgw_language = request.LANGUAGE_CODE
def after_success(self):
print 'succesfully overriden success'
def after_failure(self):
print 'succesfully overriden failure'
| from htpayway import PayWay
from mock import Mock
from decimal import Decimal
class ThisPayWay(PayWay):
pgw_shop_id = '20000186'
pgw_secret_key = "pZclhO{2G+RlMR#FWX{9g5'C"
def set_order(self, order):
# mock data
self.order = Mock(name='order')
self.order.id = '11'
self.order.first_name = 'Igor'
self.order.last_name = 'Pejic'
self.order.street = 'Bujska'
self.order.city = 'Rijeka'
self.order.post_code = '51000'
self.order.country = 'Hrvatska'
self.order.telephone = '0992347823'
self.order.email = 'dev-support@logit.hr'
self.order.amount = Decimal('230.30')
# url na koji se vraca nakon placanja
def set_request(self, request):
self.pgw_language = request.LANGUAGE_CODE
def after_success(self):
print 'succesfully overriden success'
def after_failure(self):
print 'succesfully overriden failure'
| mit | Python |
ca440802b4548ca24cb4c7a83b65c890c06254e4 | Debug flag | cf-platform-eng/aws-pcf-quickstart,cf-platform-eng/aws-pcf-quickstart,cf-platform-eng/aws-pcf-quickstart | quickstart.py | quickstart.py | import datetime
import os
import sys
import time
import click
PATH = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(1, os.path.join(PATH, 'lib'))
from lib import settings, om_manager
# todo: cli flag...?
om_manager.debug_mode = True
@click.group()
@click.option('--debug/--no-debug', default=False)
@click.pass_context
def cli(ctx, debug):
my_settings = settings.Settings()
ctx.obj['settings'] = my_settings
@cli.command('configure-opsman-auth')
@click.pass_context
def config_opsman_auth_cmd(ctx):
sys.exit(time_cmd(om_manager.config_opsman_auth, ctx.obj['settings']))
def time_cmd(cmd, *args):
cmd_name = cmd.__name__
print("Starting {}".format(cmd_name))
start = time.time()
exit_code = cmd(*args)
end = time.time()
print("Duration for {}: {}".format(cmd_name, datetime.timedelta(seconds=end - start)))
if exit_code != 0:
print("{} failed".format(cmd_name))
return exit_code
if __name__ == "__main__":
cli(obj={})
| import datetime
import os
import sys
import time
import click
PATH = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(1, os.path.join(PATH, 'lib'))
from lib import settings, om_manager
# todo: cli flag...?
om_manager.debug_mode = False
@click.group()
@click.pass_context
def cli(ctx):
my_settings = settings.Settings()
ctx.obj['settings'] = my_settings
@cli.command('configure-opsman-auth')
@click.pass_context
def config_opsman_auth_cmd(ctx):
sys.exit(time_cmd(om_manager.config_opsman_auth, ctx.obj['settings']))
def time_cmd(cmd, *args):
cmd_name = cmd.__name__
print("Starting {}".format(cmd_name))
start = time.time()
exit_code = cmd(*args)
end = time.time()
print("Duration for {}: {}".format(cmd_name, datetime.timedelta(seconds=end - start)))
if exit_code != 0:
print("{} failed".format(cmd_name))
return exit_code
if __name__ == "__main__":
cli(obj={})
| apache-2.0 | Python |
bcb28563b0b8e3cfbc574771ad8a9929ec58efd2 | Update XFileSharing.py | vuolter/pyload,vuolter/pyload,vuolter/pyload | module/plugins/hoster/XFileSharing.py | module/plugins/hoster/XFileSharing.py | # -*- coding: utf-8 -*-
import re
from module.plugins.internal.XFSHoster import XFSHoster, create_getInfo
class XFileSharing(XFSHoster):
__name__ = "XFileSharing"
__type__ = "hoster"
__version__ = "0.62"
__status__ = "testing"
__pattern__ = r'^unmatchable$'
__config__ = [("activated", "bool", "Activated", True)]
__description__ = """XFileSharing dummy hoster plugin for hook"""
__license__ = "GPLv3"
__authors__ = [("Walter Purcaro", "vuolter@gmail.com")]
URL_REPLACEMENTS = [("/embed-", "/")]
def _log(self, level, plugintype, pluginname, messages):
messages = (self.PLUGIN_NAME,) + messages
return super(XFileSharing, self)._log(level, plugintype, pluginname, messages)
def init(self):
self.__pattern__ = self.pyload.pluginManager.hosterPlugins[self.classname]['pattern']
self.PLUGIN_DOMAIN = re.match(self.__pattern__, self.pyfile.url).group("DOMAIN").lower()
self.PLUGIN_NAME = "".join(part.capitalize() for part in re.split(r'\.|\d+|-', self.PLUGIN_DOMAIN) if part != '.')
def setup(self):
self.chunk_limit = -1 if self.premium else 1
self.multiDL = True
self.resume_download = self.premium
#@TODO: Recheck in 0.4.10
def setup_base(self):
if self.account:
self.req = self.pyload.requestFactory.getRequest(self.PLUGIN_NAME, self.account.user)
self.premium = self.account.info['data']['premium'] #@NOTE: Avoid one unnecessary get_info call by `self.account.premium` here
else:
self.req = self.pyload.requestFactory.getRequest(self.classname)
self.premium = False
super(XFileSharing, self).setup_base()
#@TODO: Recheck in 0.4.10
def load_account(self):
class_name = self.classname
self.__class__.__name__ = str(self.PLUGIN_NAME)
super(XFileSharing, self).load_account()
self.__class__.__name__ = class_name
getInfo = create_getInfo(XFileSharing)
| # -*- coding: utf-8 -*-
import re
from module.plugins.internal.XFSHoster import XFSHoster, create_getInfo
class XFileSharing(XFSHoster):
__name__ = "XFileSharing"
__type__ = "hoster"
__version__ = "0.61"
__status__ = "testing"
__pattern__ = r'^unmatchable$'
__config__ = [("activated", "bool", "Activated", True)]
__description__ = """XFileSharing dummy hoster plugin for hook"""
__license__ = "GPLv3"
__authors__ = [("Walter Purcaro", "vuolter@gmail.com")]
URL_REPLACEMENTS = [("/embed-", "/")]
def _log(self, level, plugintype, pluginname, messages):
messages = (self.PLUGIN_NAME,) + messages
return super(XFileSharing, self)._log(level, plugintype, pluginname, messages)
def init(self):
self.__pattern__ = self.pyload.pluginManager.hosterPlugins[self.classname]['pattern']
self.PLUGIN_DOMAIN = re.match(self.__pattern__, self.pyfile.url).group("DOMAIN").lower()
self.PLUGIN_NAME = "".join(part.capitalize() for part in re.split(r'\.|\d+|-', self.PLUGIN_DOMAIN) if part != '.')
def setup(self):
self.chunk_limit = -1 if self.premium else 1
self.multiDL = True
self.resume_download = self.premium
#@TODO: Recheck in 0.4.10
def setup_base(self):
if self.account:
self.req = self.pyload.requestFactory.getRequest(self.PLUGIN_NAME, self.account.user)
self.premium = self.account.info['data']['premium'] #@NOTE: Avoid one unnecessary get_info call by `self.account.premium` here
else:
self.req = self.pyload.requestFactory.getRequest(self.classname)
self.premium = False
super(SimpleCrypter, self).setup_base()
#@TODO: Recheck in 0.4.10
def load_account(self):
class_name = self.classname
self.__class__.__name__ = str(self.PLUGIN_NAME)
super(XFileSharing, self).load_account()
self.__class__.__name__ = class_name
getInfo = create_getInfo(XFileSharing)
| agpl-3.0 | Python |
995d14e1d0734654c83d7534595f89228f815ef3 | fix (Windows) issue #42 | llvmpy/llvmpy,llvmpy/llvmpy,llvmpy/llvmpy,llvmpy/llvmpy,llvmpy/llvmpy,llvmpy/llvmpy | llvm-config-win32.py | llvm-config-win32.py | import sys, os
def find_path_of(filename, envvar='PATH'):
"""Finds the path from $PATH where the file exists, returns None if not found."""
pathlist = os.getenv(envvar).split(os.pathsep)
for path in pathlist:
if os.path.exists(os.path.join(path, filename)):
return os.path.abspath(path)
return None
if sys.argv[1] == '--version':
cmd = 'llvm-tblgen --version'
# Hardcoded extraction, only tested on llvm 3.1
result = os.popen(cmd).read().split('\n')[1].strip().split(' ')[2]
print result
elif sys.argv[1] == '--libs':
# NOTE: instead of actually looking at the components requested,
# we just spit out a bunch of libs
for lib in """
LLVMAnalysis
LLVMAsmParser
LLVMAsmPrinter
LLVMBitReader
LLVMBitWriter
LLVMCodeGen
LLVMCore
LLVMExecutionEngine
LLVMInstCombine
LLVMInstrumentation
LLVMInterpreter
LLVMipa
LLVMipo
LLVMJIT
LLVMLinker
LLVMMC
LLVMMCParser
LLVMObject
LLVMRuntimeDyld
LLVMScalarOpts
LLVMSelectionDAG
LLVMSupport
LLVMTarget
LLVMTransformUtils
LLVMVectorize
LLVMX86AsmParser
LLVMX86AsmPrinter
LLVMX86CodeGen
LLVMX86Desc
LLVMX86Info
LLVMX86Utils
Advapi32
Shell32
""".split():
print('-l%s' % lib)
llvmbin = find_path_of('llvm-tblgen.exe')
if os.path.exists(os.path.join(llvmbin, '../lib/LLVMPTXCodeGen.lib')):
print('-lLLVMPTXAsmPrinter')
print('-lLLVMPTXCodeGen')
print('-lLLVMPTXDesc')
print('-lLLVMPTXInfo')
elif sys.argv[1] == '--includedir':
llvmbin = find_path_of('llvm-tblgen.exe')
if llvmbin is None:
raise RuntimeError('Could not find LLVM')
incdir = os.path.abspath(os.path.join(llvmbin, '../include'))
if not os.path.exists(os.path.join(incdir, 'llvm/BasicBlock.h')):
raise RuntimeError('Could not find LLVM include dir')
print incdir
elif sys.argv[1] == '--libdir':
llvmbin = find_path_of('llvm-tblgen.exe')
if llvmbin is None:
raise RuntimeError('Could not find LLVM')
libdir = os.path.abspath(os.path.join(llvmbin, '../lib'))
if not os.path.exists(os.path.join(libdir, 'LLVMCore.lib')):
raise RuntimeError('Could not find LLVM lib dir')
print libdir
else:
raise RuntimeError('Unrecognized llvm-config command %s' % sys.argv[1])
| import sys, os
def find_path_of(filename, envvar='PATH'):
"""Finds the path from $PATH where the file exists, returns None if not found."""
pathlist = os.getenv(envvar).split(os.pathsep)
for path in pathlist:
if os.path.exists(os.path.join(path, filename)):
return os.path.abspath(path)
return None
if sys.argv[1] == '--version':
cmd = 'llvm-tblgen --version'
# Hardcoded extraction, only tested on llvm 3.1
result = os.popen(cmd).read().split('\n')[1].strip().split(' ')[2]
print result
elif sys.argv[1] == '--libs':
# NOTE: instead of actually looking at the components requested,
# we just spit out a bunch of libs
for lib in """
LLVMAnalysis
LLVMAsmParser
LLVMAsmPrinter
LLVMBitReader
LLVMBitWriter
LLVMCodeGen
LLVMCore
LLVMExecutionEngine
LLVMInstCombine
LLVMInstrumentation
LLVMInterpreter
LLVMipa
LLVMipo
LLVMJIT
LLVMLinker
LLVMMC
LLVMMCParser
LLVMScalarOpts
LLVMSelectionDAG
LLVMSupport
LLVMTarget
LLVMTransformUtils
LLVMVectorize
LLVMX86AsmParser
LLVMX86AsmPrinter
LLVMX86CodeGen
LLVMX86Desc
LLVMX86Info
LLVMX86Utils
Advapi32
Shell32
""".split():
print('-l%s' % lib)
llvmbin = find_path_of('llvm-tblgen.exe')
if os.path.exists(os.path.join(llvmbin, '../lib/LLVMPTXCodeGen.lib')):
print('-lLLVMPTXAsmPrinter')
print('-lLLVMPTXCodeGen')
print('-lLLVMPTXDesc')
print('-lLLVMPTXInfo')
elif sys.argv[1] == '--includedir':
llvmbin = find_path_of('llvm-tblgen.exe')
if llvmbin is None:
raise RuntimeError('Could not find LLVM')
incdir = os.path.abspath(os.path.join(llvmbin, '../include'))
if not os.path.exists(os.path.join(incdir, 'llvm/BasicBlock.h')):
raise RuntimeError('Could not find LLVM include dir')
print incdir
elif sys.argv[1] == '--libdir':
llvmbin = find_path_of('llvm-tblgen.exe')
if llvmbin is None:
raise RuntimeError('Could not find LLVM')
libdir = os.path.abspath(os.path.join(llvmbin, '../lib'))
if not os.path.exists(os.path.join(libdir, 'LLVMCore.lib')):
raise RuntimeError('Could not find LLVM lib dir')
print libdir
else:
raise RuntimeError('Unrecognized llvm-config command %s' % sys.argv[1])
| bsd-3-clause | Python |
54bd5a1311c8e2a9c9097888c4b302558308054d | change CNN spider into a crawl spider | dmkoch/scrapy-intro,dmkoch/scrapy-intro | newsbot/newsbot/spiders/cnn_spider.py | newsbot/newsbot/spiders/cnn_spider.py | from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.selector import HtmlXPathSelector
from ..items import ArticleItem
class CnnSpider(CrawlSpider):
name = 'cnn'
allowed_domains = ['www.cnn.com']
start_urls = [
'http://www.cnn.com/',
]
rules = [
Rule(SgmlLinkExtractor(allow=[r'/\d{4}/\d{2}/\d{2}/[^/]+/[^/]+/index.html']),
callback='parse_item'),
]
def parse_item(self, response):
hxs = HtmlXPathSelector(response)
article = ArticleItem()
article['url'] = response.url
article['source'] = self.name
article['headline'] = hxs.select(
'//*[@id="cnnContentContainer"]/h1[1]/text()').extract()
article['byline'] = hxs.select(
'//*[@id="cnnContentContainer"]//div[@class="cnnByline"]'
'//text()').extract()
article['article'] = hxs.select(
'//*[@id="cnnContentContainer"]//p//text()').extract()
return article
| from scrapy.spider import BaseSpider
class CnnSpider(BaseSpider):
name = 'cnn'
allowed_domains = ['www.cnn.com']
start_urls = [
'http://www.cnn.com/',
]
def parse(self, response):
open('homepage.html', 'wb').write(response.body)
| mit | Python |
a46783afc4c4c92e917c12b68972a56bdb8f2597 | make compatible with Python 3 | SpiderLabs/owasp-modsecurity-crs,SpiderLabs/owasp-modsecurity-crs,SpiderLabs/owasp-modsecurity-crs,SpiderLabs/owasp-modsecurity-crs,coreruleset/coreruleset,SpiderLabs/owasp-modsecurity-crs,umarfarook882/owasp-modsecurity-crs,coreruleset/coreruleset,SpiderLabs/owasp-modsecurity-crs,umarfarook882/owasp-modsecurity-crs,coreruleset/coreruleset,SpiderLabs/owasp-modsecurity-crs,coreruleset/coreruleset,umarfarook882/owasp-modsecurity-crs,coreruleset/coreruleset,umarfarook882/owasp-modsecurity-crs,umarfarook882/owasp-modsecurity-crs,coreruleset/coreruleset | util/regexp-assemble/regexp-cmdline.py | util/regexp-assemble/regexp-cmdline.py | #!/usr/bin/env python
#
# Convert a word list to a list of regexps usable by Regexp::Assemble.
#
# Examples:
# cat regexp-932100.txt | ./regexp-cmdline.py unix | ./regexp-assemble.pl
# cat regexp-932110.txt | ./regexp-cmdline.py windows | ./regexp-assemble.pl
# cat regexp-932150.txt | ./regexp-cmdline.py unix | ./regexp-assemble.pl
#
# Refer to rule 932100, 932110, 932150 for documentation.
#
import fileinput, string, sys
# Convert a single line to regexp format, and insert anti-cmdline
# evasions between characters.
def regexp_str(str, evasion):
# By convention, if the line starts with ' char, copy the rest
# verbatim.
if str[0] == "'":
return str[1:]
result = ''
for i, char in enumerate(str):
if i > 0:
result += evasion
result += regexp_char(char, evasion)
return result
# Ensure that some special characters are escaped
def regexp_char(char, evasion):
char = str.replace(char, ' ', '\s')
char = str.replace(char, '.', '\.')
char = str.replace(char, '-', '\-')
char = str.replace(char, '+', r'''(?:\s|<|>).*''')
# Unix: "cat foo", "cat<foo", "cat>foo"
char = str.replace(char, '@', r'''(?:[\s,;]|\.|/|<|>).*''')
# Windows: "more foo", "more,foo", "more;foo", "more.com", "more/e",
# "more<foo", "more>foo"
return char
# Insert these sequences between characters to prevent evasion.
# This emulates the relevant parts of t:cmdLine.
evasions = {
'unix': r'''[\\\\'\"]*''',
'windows': r'''[\"\^]*''',
}
# Parse arguments
if len(sys.argv) <= 1 or not sys.argv[1] in evasions:
print(sys.argv[0] + ' unix|windows [infile]')
sys.exit(1)
evasion = evasions[sys.argv[1]]
del sys.argv[1]
# Process lines from input file, or if not specified, standard input
for line in fileinput.input():
line = line.rstrip('\n')
line = line.split('#')[0]
if line != '':
print(regexp_str(line, evasion))
| #!/usr/bin/env python
#
# Convert a word list to a list of regexps usable by Regexp::Assemble.
#
# Examples:
# cat regexp-932100.txt | ./regexp-cmdline.py unix | ./regexp-assemble.pl
# cat regexp-932110.txt | ./regexp-cmdline.py windows | ./regexp-assemble.pl
# cat regexp-932150.txt | ./regexp-cmdline.py unix | ./regexp-assemble.pl
#
# Refer to rule 932100, 932110, 932150 for documentation.
#
import fileinput, string, sys
# Convert a single line to regexp format, and insert anti-cmdline
# evasions between characters.
def regexp_str(str, evasion):
# By convention, if the line starts with ' char, copy the rest
# verbatim.
if str[0] == "'":
return str[1:]
result = ''
for i, char in enumerate(str):
if i > 0:
result += evasion
result += regexp_char(char, evasion)
return result
# Ensure that some special characters are escaped
def regexp_char(char, evasion):
char = string.replace(char, ' ', '\s')
char = string.replace(char, '.', '\.')
char = string.replace(char, '-', '\-')
char = string.replace(char, '+', r'''(?:\s|<|>).*''')
# Unix: "cat foo", "cat<foo", "cat>foo"
char = string.replace(char, '@', r'''(?:[\s,;]|\.|/|<|>).*''')
# Windows: "more foo", "more,foo", "more;foo", "more.com", "more/e",
# "more<foo", "more>foo"
return char
# Insert these sequences between characters to prevent evasion.
# This emulates the relevant parts of t:cmdLine.
evasions = {
'unix': r'''[\\\\'\"]*''',
'windows': r'''[\"\^]*''',
}
# Parse arguments
if len(sys.argv) <= 1 or not sys.argv[1] in evasions:
print sys.argv[0] + ' unix|windows [infile]'
sys.exit(1)
evasion = evasions[sys.argv[1]]
del sys.argv[1]
# Process lines from input file, or if not specified, standard input
for line in fileinput.input():
line = line.rstrip('\n')
line = line.split('#')[0]
if line != '':
print regexp_str(line, evasion)
| apache-2.0 | Python |
388895522ffc4f817759005a08c6bb6be1dafc30 | Increment version number | jbittel/django-mama-cas,jbittel/django-mama-cas,orbitvu/django-mama-cas,orbitvu/django-mama-cas | mama_cas/__init__.py | mama_cas/__init__.py | __version_info__ = (1, 2, 0)
__version__ = '.'.join([str(v) for v in __version_info__])
| __version_info__ = (1, 1, 1)
__version__ = '.'.join([str(v) for v in __version_info__])
| bsd-3-clause | Python |
8f355bd81a4f3f12c98375e1b2ca1bfa55737cac | Bump version | renalreg/cornflake | cornflake/__init__.py | cornflake/__init__.py | __version__ = '0.2.2'
| __version__ = '0.2.1'
| mit | Python |
1602f256dedf098345930b9af4228fc32d1c43d9 | add set_rtld_flags to __init__.py | tbenthompson/cppimport,tbenthompson/cppimport,tbenthompson/cppimport | cppimport/__init__.py | cppimport/__init__.py | from cppimport.config import set_quiet, force_rebuild, file_exts, turn_off_strict_prototypes, set_rtld_flags
from cppimport.importer import imp, imp_from_filepath
from cppimport.templating import setup_pybind11
from cppimport.importer import imp as cppimport
| from cppimport.config import set_quiet, force_rebuild, file_exts, turn_off_strict_prototypes
from cppimport.importer import imp, imp_from_filepath
from cppimport.templating import setup_pybind11
from cppimport.importer import imp as cppimport
| mit | Python |
92ad206b84dff9f02d1c855232d1489ced9cbdf3 | Use re instead of shlex.split to find executable | thaim/ansible,thaim/ansible | lib/ansible/runner/action_plugins/raw.py | lib/ansible/runner/action_plugins/raw.py | # (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import re
import ansible.constants as C
from ansible import utils
from ansible import errors
from ansible.runner.return_data import ReturnData
class ActionModule(object):
NEEDS_TMPPATH = False
def __init__(self, runner):
self.runner = runner
def run(self, conn, tmp, module_name, module_args, inject):
executable = None
# From library/command, keep in sync
r = re.compile(r'(^|\s)(executable)=(?P<quote>[\'"])?(.*?)(?(quote)(?<!\\)(?P=quote))((?<!\\)\s|$)')
for m in r.finditer(module_args):
v = m.group(4).replace("\\", "")
if m.group(2) == "executable":
executable = v
module_args = r.sub("", module_args)
return ReturnData(conn=conn,
result=self.runner._low_level_exec_command(conn, module_args, tmp, sudoable=True, executable=executable)
)
| # (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import shlex
import ansible.constants as C
from ansible import utils
from ansible import errors
from ansible.runner.return_data import ReturnData
class ActionModule(object):
NEEDS_TMPPATH = False
def __init__(self, runner):
self.runner = runner
def run(self, conn, tmp, module_name, module_args, inject):
executable = None
args = []
for arg in shlex.split(module_args.encode("utf-8")):
if arg.startswith('executable='):
executable = arg.split('=', 1)[1]
else:
args.append(arg)
module_args = ' '.join(args)
return ReturnData(conn=conn,
result=self.runner._low_level_exec_command(conn, module_args, tmp, sudoable=True, executable=executable)
)
| mit | Python |
fb9591c4a2801bfe5f5380c3e33aa44a25db3591 | Add absolute URLs to form and question admin | cschwede/django-customforms | customforms/models.py | customforms/models.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext as _
from django.db import models
class Form(models.Model):
title = models.CharField(_("Title"), max_length=255)
def __unicode__(self):
return u'%s' % self.title
class Meta:
ordering = ('title', )
def get_absolute_url(self):
return reverse('customforms.views.view_form', args=[str(self.id)])
class Question(models.Model):
form = models.ForeignKey(Form)
title = models.CharField(
_("Title"), max_length=255, default=_("Question Title"))
help_text = models.TextField(blank=True, null=True)
CHOICES = [
('C', _('Checkbox')),
('R', _('Radio')),
('S', _('Select')),
('T', _('Text')),
]
question_type = models.CharField(
max_length=1, choices=CHOICES, default="T")
required = models.BooleanField(default=False)
position = models.PositiveIntegerField(default=0)
def __unicode__(self):
return u'%s' % (self.title, )
class Meta:
ordering = ('form', 'position', )
def get_absolute_url(self):
return reverse('customforms.views.view_form', args=[str(self.form.id)])
class Choice(models.Model):
question = models.ForeignKey(Question)
title = models.CharField(max_length=200,)
position = models.PositiveIntegerField(default=0)
class Meta:
ordering = ('position', )
def __unicode__(self):
return u'%s' % (self.title, )
| #!/usr/bin/python
# -*- coding: utf-8 -*-
from django.utils.translation import ugettext as _
from django.db import models
class Form(models.Model):
title = models.CharField(_("Title"), max_length=255)
def __unicode__(self):
return u'%s' % self.title
class Meta:
ordering = ('title', )
class Question(models.Model):
form = models.ForeignKey(Form)
title = models.CharField(
_("Title"), max_length=255, default=_("Question Title"))
help_text = models.TextField(blank=True, null=True)
CHOICES = [
('C', _('Checkbox')),
('R', _('Radio')),
('S', _('Select')),
('T', _('Text')),
]
question_type = models.CharField(
max_length=1, choices=CHOICES, default="T")
required = models.BooleanField(default=False)
position = models.PositiveIntegerField(default=0)
def __unicode__(self):
return u'%s' % (self.title, )
class Meta:
ordering = ('form', 'position', )
class Choice(models.Model):
question = models.ForeignKey(Question)
title = models.CharField(max_length=200,)
position = models.PositiveIntegerField(default=0)
class Meta:
ordering = ('position', )
def __unicode__(self):
return u'%s' % (self.title, )
| apache-2.0 | Python |
5a1f72de985b9c6bccafa0b2e9f21be9abbc77ca | Add log-level CLI option | praekeltfoundation/certbot,praekeltfoundation/certbot | marathon_acme/cli.py | marathon_acme/cli.py | import argparse
import sys
def main(raw_args=sys.argv[1:]):
"""
A tool to automatically request, renew and distribute Let's Encrypt
certificates for apps running on Marathon and served by marathon-lb.
"""
parser = argparse.ArgumentParser(
description='Automatically manage ACME certificates for Marathon apps')
parser.add_argument('-a', '--acme',
help='The address for the ACME Directory Resource '
'(default: %(default)s)',
default=(
'https://acme-v01.api.letsencrypt.org/directory'))
parser.add_argument('-m', '--marathon',
help='The address for the Marathon HTTP API (default: '
'%(default)s)',
default='http://marathon.mesos:8080')
parser.add_argument('-l', '--lb', nargs='+',
help='The address for the marathon-lb HTTP API '
'(default: %(default)s)',
default='http://marathon-lb.marathon.mesos:9090')
parser.add_argument('-g', '--group',
help='The marathon-lb group to issue certificates for '
'(default: %(default)s)',
default='external')
parser.add_argument('--log-level',
help='The minimum severity level to log messages at '
'(default: %(default)s)',
choices=['debug', 'info', 'warn', 'error', 'critical'],
default='info'),
parser.add_argument('storage-dir',
help='Path to directory for storing certificates')
args = parser.parse_args(raw_args) # noqa
if __name__ == '__main__':
main()
| import argparse
import sys
def main(raw_args=sys.argv[1:]):
"""
A tool to automatically request, renew and distribute Let's Encrypt
certificates for apps running on Marathon and served by marathon-lb.
"""
parser = argparse.ArgumentParser(
description='Automatically manage ACME certificates for Marathon apps')
parser.add_argument('-a', '--acme',
help='The address for the ACME Directory Resource '
'(default: %(default)s)',
default=(
'https://acme-v01.api.letsencrypt.org/directory'))
parser.add_argument('-m', '--marathon',
help='The address for the Marathon HTTP API (default: '
'%(default)s)',
default='http://marathon.mesos:8080')
parser.add_argument('-l', '--lb', nargs='+',
help='The address for the marathon-lb HTTP API '
'(default: %(default)s)',
default='http://marathon-lb.marathon.mesos:9090')
parser.add_argument('-g', '--group',
help='The marathon-lb group to issue certificates for '
'(default: %(default)s)',
default='external')
parser.add_argument('storage-dir',
help='Path to directory for storing certificates')
args = parser.parse_args(raw_args) # noqa
if __name__ == '__main__':
main()
| mit | Python |
1b6ab75db3619ba4e686ee504a954c5391598c34 | fix logging | ponty/psidialogs,ponty/psidialogs,ponty/psidialogs | psidialogs/backend_api.py | psidialogs/backend_api.py | from psidialogs.mixins import AllMixin
from psidialogs.backendloader import BackendLoader
import logging
log = logging.getLogger(__name__)
def opendialog(funcname, argdict):
for (k, v) in argdict.items():
if v is None:
argdict[k] = ""
log.debug(funcname)
log.debug(argdict)
b = BackendLoader().selected()
f = b.__class__.__dict__.get(funcname)
if not f:
class Backend(b.__class__, AllMixin):
pass
b = Backend()
f = AllMixin.__dict__.get(funcname)
return f(b, argdict)
| from psidialogs.mixins import AllMixin
from psidialogs.backendloader import BackendLoader
import logging
def opendialog(funcname, argdict):
for (k, v) in argdict.items():
if v is None:
argdict[k] = ""
logging.debug(funcname)
logging.debug(argdict)
b = BackendLoader().selected()
f = b.__class__.__dict__.get(funcname)
if not f:
class Backend(b.__class__, AllMixin):
pass
b = Backend()
f = AllMixin.__dict__.get(funcname)
return f(b, argdict)
| bsd-2-clause | Python |
854b8102518f102308d545603cd93e0c9945c4b9 | Bump version | timxx/gitc,timxx/gitc | qgitc/version.py | qgitc/version.py | # -*- coding: utf-8 -*-
VERSION_MAJOR = 2
VERSION_MINOR = 0
VERSION_PATCH = 1
VERSION = "{}.{}.{}".format(VERSION_MAJOR,
VERSION_MINOR,
VERSION_PATCH)
| # -*- coding: utf-8 -*-
VERSION_MAJOR = 2
VERSION_MINOR = 0
VERSION_PATCH = 0
VERSION = "{}.{}.{}".format(VERSION_MAJOR,
VERSION_MINOR,
VERSION_PATCH)
| apache-2.0 | Python |
b208e1ed4ad8bda465ac0cb5d83e5f64a81556c0 | manage when a user changes their mind | battlemidget/juju-layer-node | reactive/node.py | reactive/node.py | from charms.reactive import (
hook,
set_state,
remove_state,
main,
when_not,
)
from charmhelpers.core import (
hookenv,
unitdata,
)
from charms import apt
config = hookenv.config()
kv = unitdata.kv()
@when_not('nodejs.available')
def install_nodejs():
""" Installs defined node runtime
Emits:
nodejs.available: Emitted once the runtime has been installed
"""
hookenv.status_set('maintenance', 'installing Node.js')
kv.set('nodejs.url', config.get('install_sources'))
kv.set('nodejs.key', config.get('install_keys'))
apt.queue_install(['nodejs'])
hookenv.status_set('active', 'node.js is ready')
set_state('nodejs.available')
@hook('config-changed')
def version_check():
url = config.get('install_sources')
key = config.get('install_keys')
if url != kv.get('nodejs.url') or key != kv.get('nodejs.key'):
apt.purge(['nodejs'])
remove_state('nodejs.available')
if __name__ == "__main__":
main()
| from charms.reactive import (
hook,
set_state,
remove_state,
main,
when_not,
)
from charmhelpers.core import (
hookenv,
unitdata,
)
from charms import apt
config = hookenv.config()
kv = unitdata.kv()
@when_not('nodejs.available')
def install_nodejs():
""" Installs defined node runtime
Emits:
nodejs.available: Emitted once the runtime has been installed
"""
hookenv.status_set('maintenance', 'installing Node.js')
kv.set('nodejs.url', config.get('install_sources'))
kv.set('nodejs.key', config.get('install_keys'))
apt.queue_install(['nodejs'])
hookenv.status_set('active', 'node.js is ready')
set_state('nodejs.available')
if __name__ == "__main__":
main()
| mit | Python |
f3359b7ad48fbbbbed30160e748d6c663b6fcdbd | fix #392 | ysekky/GPy,dhhjx880713/GPy,befelix/GPy,SheffieldML/GPy,SheffieldML/GPy,dhhjx880713/GPy,esiivola/GPYgradients,esiivola/GPYgradients,dhhjx880713/GPy,mikecroucher/GPy,befelix/GPy,SheffieldML/GPy,mikecroucher/GPy,ysekky/GPy,befelix/GPy,ysekky/GPy,esiivola/GPYgradients,SheffieldML/GPy,befelix/GPy,dhhjx880713/GPy,mikecroucher/GPy,mikecroucher/GPy,esiivola/GPYgradients,ysekky/GPy | GPy/mappings/__init__.py | GPy/mappings/__init__.py | # Copyright (c) 2013, 2014 GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
from .kernel import Kernel
from .linear import Linear
from .mlp import MLP
from .additive import Additive
from .compound import Compound
from .constant import Constant
from .identity import Identity
from .piecewise_linear import PiecewiseLinear
| # Copyright (c) 2013, 2014 GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
from .kernel import Kernel
from .linear import Linear
from .mlp import MLP
from .additive import Additive
from .compound import Compound
from .constant import Constant
| bsd-3-clause | Python |
4926dd72c831960c7d6167f1ec73a663506aee82 | Fix wrong variable name. | sk-/python2.7-type-annotator,sk-/python2.7-type-annotator,sk-/python2.7-type-annotator | Lib/encodings/charmap.py | Lib/encodings/charmap.py | """ Generic Python Character Mapping Codec.
Use this codec directly rather than through the automatic
conversion mechanisms supplied by unicode() and .encode().
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
# Note: Binding these as C functions will result in the class not
# converting them to methods. This is intended.
encode = codecs.charmap_encode
decode = codecs.charmap_decode
class StreamWriter(Codec,codecs.StreamWriter):
def __init__(self,stream,errors='strict',mapping=None):
codecs.StreamWriter.__init__(self,stream,errors)
self.mapping = mapping
def encode(self,input,errors='strict'):
return Codec.encode(input,errors,self.mapping)
class StreamReader(Codec,codecs.StreamReader):
def __init__(self,stream,errors='strict',mapping=None):
codecs.StreamReader.__init__(self,stream,errors)
self.mapping = mapping
def decode(self,input,errors='strict'):
return Codec.decode(input,errors,self.mapping)
### encodings module API
def getregentry():
return (Codec.encode,Codec.decode,StreamReader,StreamWriter)
| """ Generic Python Character Mapping Codec.
Use this codec directly rather than through the automatic
conversion mechanisms supplied by unicode() and .encode().
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
# Note: Binding these as C functions will result in the class not
# converting them to methods. This is intended.
encode = codecs.charmap_encode
decode = codecs.charmap_decode
class StreamWriter(Codec,codecs.StreamWriter):
def __init__(self,stream,errors='strict',mapping=None):
codecs.StreamWriter.__init__(self,stream,errors)
self.mapping = mapping
def encode(self,input,errors='strict'):
return Codec.encode(input,errors,self.mapping)
class StreamReader(Codec,codecs.StreamReader):
def __init__(self,stream,errors='strict',mapping=None):
codecs.StreamReader.__init__(self,strict,errors)
self.mapping = mapping
def decode(self,input,errors='strict'):
return Codec.decode(input,errors,self.mapping)
### encodings module API
def getregentry():
return (Codec.encode,Codec.decode,StreamReader,StreamWriter)
| mit | Python |
10f31cc96c776453be941ed0010cf4e88233c975 | update swallow_argv test with prefix-matching | ipython/ipython,ipython/ipython | IPython/kernel/tests/test_launcher.py | IPython/kernel/tests/test_launcher.py | """Tests for kernel utility functions
Authors
-------
* MinRK
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2011, the IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Stdlib imports
from unittest import TestCase
# Third-party imports
import nose.tools as nt
# Our own imports
from IPython.testing import decorators as dec
from IPython.kernel.launcher import swallow_argv
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
@dec.parametric
def test_swallow_argv():
tests = [
# expected , argv , aliases, flags
(['-a', '5'], ['-a', '5'], None, None),
(['5'], ['-a', '5'], None, ['a']),
([], ['-a', '5'], ['a'], None),
([], ['-a', '5'], ['a'], ['a']),
([], ['--foo'], None, ['foo']),
([], ['--foo'], ['foobar'], []),
([], ['--foo', '5'], ['foo'], []),
([], ['--foo=5'], ['foo'], []),
(['--foo=5'], ['--foo=5'], [], ['foo']),
(['5'], ['--foo', '5'], [], ['foo']),
(['bar'], ['--foo', '5', 'bar'], ['foo'], ['foo']),
(['bar'], ['--foo=5', 'bar'], ['foo'], ['foo']),
(['5','bar'], ['--foo', '5', 'bar'], None, ['foo']),
(['bar'], ['--foo', '5', 'bar'], ['foo'], None),
(['bar'], ['--foo=5', 'bar'], ['foo'], None),
]
for expected, argv, aliases, flags in tests:
stripped = swallow_argv(argv, aliases=aliases, flags=flags)
message = '\n'.join(['',
"argv: %r" % argv,
"aliases: %r" % aliases,
"flags : %r" % flags,
"expected : %r" % expected,
"returned : %r" % stripped,
])
yield nt.assert_equal(expected, stripped, message)
| """Tests for kernel utility functions
Authors
-------
* MinRK
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2011, the IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Stdlib imports
from unittest import TestCase
# Third-party imports
import nose.tools as nt
# Our own imports
from IPython.testing import decorators as dec
from IPython.kernel.launcher import swallow_argv
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
@dec.parametric
def test_swallow_argv():
tests = [
# expected , argv , aliases, flags
(['-a', '5'], ['-a', '5'], None, None),
(['5'], ['-a', '5'], None, ['a']),
([], ['-a', '5'], ['a'], None),
([], ['-a', '5'], ['a'], ['a']),
([], ['--foo'], None, ['foo']),
(['--foo'], ['--foo'], ['foobar'], []),
([], ['--foo', '5'], ['foo'], []),
([], ['--foo=5'], ['foo'], []),
(['--foo=5'], ['--foo=5'], [], ['foo']),
(['5'], ['--foo', '5'], [], ['foo']),
(['bar'], ['--foo', '5', 'bar'], ['foo'], ['foo']),
(['bar'], ['--foo=5', 'bar'], ['foo'], ['foo']),
(['5','bar'], ['--foo', '5', 'bar'], None, ['foo']),
(['bar'], ['--foo', '5', 'bar'], ['foo'], None),
(['bar'], ['--foo=5', 'bar'], ['foo'], None),
]
for expected, argv, aliases, flags in tests:
stripped = swallow_argv(argv, aliases=aliases, flags=flags)
message = '\n'.join(['',
"argv: %r" % argv,
"aliases: %r" % aliases,
"flags : %r" % flags,
"expected : %r" % expected,
"returned : %r" % stripped,
])
yield nt.assert_equal(expected, stripped, message)
| bsd-3-clause | Python |
eea4914e2ac082ad1fecc6131cbfea52f9565139 | update dev version after 0.39.0 tag [ci skip] | desihub/desitarget,desihub/desitarget | py/desitarget/_version.py | py/desitarget/_version.py | __version__ = '0.39.0.dev3998'
| __version__ = '0.39.0'
| bsd-3-clause | Python |
b1a839e46a1c197fbd091a97fee0dea6f6f0b660 | bump dev version after 0.7.0 tag | desihub/desitarget,desihub/desitarget | py/desitarget/_version.py | py/desitarget/_version.py | __version__ = '0.7.0.dev406'
| __version__ = '0.7.0'
| bsd-3-clause | Python |
482d9fe0cfd1d729316530b684e058b8f08cc4b4 | Add meme font | The-Penultimate-Defenestrator/memefarm | memefarm/__init__.py | memefarm/__init__.py | """
memefarm - generate an effectively unlimited number of memes by blindly
combining random images and words.
"""
# Dependencies
from PIL import Image, ImageDraw, ImageFont
import random
# Internal modules
import imagesearch
import wordgen
# GLOBALS
commonwords = wordgen.getWords() # Common english words
memefont = ImageFont.truetype("Impact")
class memefarm(object):
""" A 'meme farm' capabale of generating memes. """
def __init__(self, words=commonwords):
self.words = words
def word(self):
""" Get a random word from the words with which the """
return random.choice(self.words)
def phrase(self, length=(3, 6)):
""" Create a random sentence, given an acceptable range for numbers of
number of words"""
wordcount = random.randint(*length)
return ' '.join([self.word() for _ in range(wordcount)])
def image(self):
""" Get a random image by searching for a random word """
search = self.word()
return imagesearch.getImage(search)
if __name__ == "__main__":
# Tests
mf = memefarm() # Make a memefarm
print(mf.phrase()) # Print a random sentence
i = mf.image() # Find a random image
print(i.searchterm) # Show the search term used
i.show() # Show the image
| """
memefarm - generate an effectively unlimited number of memes by blindly
combining random images and words.
"""
# Dependencies
from PIL import Image
import random
# Internal modules
import imagesearch
import wordgen
# Load words
commonwords = wordgen.getWords()
class memefarm(object):
""" A 'meme farm' capabale of generating memes. """
def __init__(self, words=commonwords):
self.words = words
def word(self):
""" Get a random word from the words with which the """
return random.choice(self.words)
def phrase(self, length=(3, 6)):
""" Create a random sentence, given an acceptable range for numbers of
number of words"""
wordcount = random.randint(*length)
return ' '.join([self.word() for _ in range(wordcount)])
def image(self):
""" Get a random image by searching for a random word """
search = self.word()
return imagesearch.getImage(search)
if __name__ == "__main__":
# Tests
mf = memefarm() # Make a memefarm
print(mf.phrase()) # Print a random sentence
i = mf.image() # Find a random image
print(i.searchterm) # Show the search term used
i.show() # Show the image
| mit | Python |
d6ff777c7fb3f645c021da1319bb5d78d13aa9db | Fix python siphashing to match c implementation | janLo/automation_mesh,janLo/automation_mesh,janLo/automation_mesh | meshnet/interface.py | meshnet/interface.py | import serial
import struct
from siphashc import siphash
def _hash(key: bytes, sender: int, receiver: int, msg_type: int, data: bytes):
packed_data = struct.pack(">hhB", sender, receiver, msg_type) + data
return struct.pack(">Q", siphash(key, packed_data))
class SerialMessage(object):
def __init__(self):
pass
def serialize(self):
pass
class Connection(object):
def __init__(self, device):
self._device = device
self._conn = None
def connect(self):
self._conn = serial.Serial(self._device, 115200)
| import serial
import struct
from siphashc import siphash
def _hash(key: str, sender: int, receiver: int, msg_type: int, data: bytes):
packed_data = struct.pack(">h>hBs", sender, receiver, msg_type, data)
return struct.pack("Q", siphash(key, packed_data))
class SerialMessage(object):
def __init__(self):
pass
def serialize(self):
pass
class Connection(object):
def __init__(self, device):
self._device = device
self._conn = None
def connect(self):
self._conn = serial.Serial(self._device, 115200)
| bsd-3-clause | Python |
6ae6db2b7dbb8ed2226cdb8d969329019bbe2993 | add restart | mabotech/mabo_sup | py/mabo_sup/controller.py | py/mabo_sup/controller.py |
"""restart app/service"""
import subprocess
class Controller(object):
"""class"""
def __init__(self):
pass
def listen(self):
"""listen restart command"""
pass
def alert(self):
"""send mail"""
pass
def restart(service):
"""restart"""
#cmd /c "net stop "Service Name" & sc start "Service Name"" ?
cmd = "net stop %s" % (service)
proc1 = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
output = proc1.communicate()[0]
print(output)
cmd = "net start %s" % (service)
proc2 = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
output = proc2.communicate()[0]
print(output)
def main():
"""test"""
service = "_nginx_1.7.0"
restart(service)
if __name__ == "__main__":
main() |
"""restart app/service"""
class Controller(object):
def __init__(self):
pass
| mit | Python |
9d6ad3b56d078143ddbbfec0674a6a0cb7f2030f | Convert result of optimization to a tuple | cigroup-ol/metaopt,cigroup-ol/metaopt,cigroup-ol/metaopt | metaopt/core/main.py | metaopt/core/main.py | # -*- coding: utf-8 -*-
from __future__ import division, print_function, with_statement
from threading import Timer
from metaopt.core.returnspec import ReturnSpec
from metaopt.invoker.multiprocess import MultiProcessInvoker
from metaopt.invoker.pluggable import PluggableInvoker
from metaopt.optimizer.saes import SAESOptimizer
from metaopt.util.stoppable import StoppedException
def custom_optimize(f, invoker, param_spec=None, return_spec=None, timeout=None,
optimizer=SAESOptimizer()):
"""
Optimize the given objective function using the specified invoker.
:param f: Objective function
:param invoker: Invoker
:param timeout: Available time for optimization (in seconds)
:param optimizer: Optimizer
"""
invoker.f = f
try:
invoker.param_spec = param_spec or f.param_spec
except AttributeError:
raise NoParamSpecError()
try:
invoker.return_spec = return_spec or f.return_spec
except AttributeError:
invoker.return_spec = ReturnSpec(f)
if timeout is not None:
Timer(timeout, invoker.stop).start()
result = optimizer.optimize(invoker, param_spec=invoker.param_spec,
return_spec=invoker.return_spec)
try:
invoker.stop()
except StoppedException:
pass
return tuple(result)
def optimize(f, param_spec=None, return_spec=None, timeout=None, plugins=[],
optimizer=SAESOptimizer()):
"""
Optimize the given objective function.
:param f: Objective function
:param timeout: Available time for optimization (in seconds)
:param plugins: List of plugins
:param optimizer: Optimizer
"""
invoker = PluggableInvoker(MultiProcessInvoker(), plugins=plugins)
return custom_optimize(f, invoker, param_spec, return_spec, timeout,
optimizer)
class NoParamSpecError(Exception):
"""The error that occurs when no ParamSpec object is provided"""
pass
| # -*- coding: utf-8 -*-
from __future__ import division, print_function, with_statement
from threading import Timer
from metaopt.core.returnspec import ReturnSpec
from metaopt.invoker.multiprocess import MultiProcessInvoker
from metaopt.invoker.pluggable import PluggableInvoker
from metaopt.optimizer.saes import SAESOptimizer
from metaopt.util.stoppable import StoppedException
def custom_optimize(f, invoker, param_spec=None, return_spec=None, timeout=None,
optimizer=SAESOptimizer()):
"""
Optimize the given objective function using the specified invoker.
:param f: Objective function
:param invoker: Invoker
:param timeout: Available time for optimization (in seconds)
:param optimizer: Optimizer
"""
invoker.f = f
try:
invoker.param_spec = param_spec or f.param_spec
except AttributeError:
raise NoParamSpecError()
try:
invoker.return_spec = return_spec or f.return_spec
except AttributeError:
invoker.return_spec = ReturnSpec(f)
if timeout is not None:
Timer(timeout, invoker.stop).start()
result = optimizer.optimize(invoker, param_spec=invoker.param_spec,
return_spec=invoker.return_spec)
try:
invoker.stop()
except StoppedException:
pass
return result
def optimize(f, param_spec=None, return_spec=None, timeout=None, plugins=[],
optimizer=SAESOptimizer()):
"""
Optimize the given objective function.
:param f: Objective function
:param timeout: Available time for optimization (in seconds)
:param plugins: List of plugins
:param optimizer: Optimizer
"""
invoker = PluggableInvoker(MultiProcessInvoker(), plugins=plugins)
return custom_optimize(f, invoker, param_spec, return_spec, timeout,
optimizer)
class NoParamSpecError(Exception):
"""The error that occurs when no ParamSpec object is provided"""
pass
| bsd-3-clause | Python |
7f314ac89ef5dac18afac30f9ad86f6bf47b630b | Remove unused module level variables | alexandermendes/pybossa-discourse | pybossa_discourse/view.py | pybossa_discourse/view.py | # -*- coding: utf8 -*-
"""Views module for pybossa-discourse."""
from flask import Blueprint, request, url_for, flash, redirect
from flask import current_app as app
from flask.ext.login import logout_user, current_user
def index():
"""Attempt to sign in via SSO then redirect to Discourse."""
discourse_sso = app.extensions['discourse']['sso']
try:
url = discourse_sso.signin()
except AttributeError as e:
flash('Access Denied: {}'.format(str(e)), 'error')
return redirect(url_for('home.home'))
return redirect(url)
def oauth_authorized():
"""Authorise a Discourse login."""
discourse_sso = app.extensions['discourse']['sso']
sso = request.args.get('sso')
sig = request.args.get('sig')
if current_user.is_anonymous():
next_url = url_for('discourse.oauth_authorized', sso=sso, sig=sig)
return redirect(url_for('account.signin', next=next_url))
try:
url = discourse_sso.validate(sso, sig)
except (ValueError, AttributeError) as e:
flash('Access Denied: {0}'.format(str(e)), 'error')
return redirect(url_for('home.home'))
return redirect(url)
def signout():
"""Signout the current user from both PyBossa and Discourse."""
discourse_client = app.extensions['discourse']['client']
if not current_user.is_anonymous():
try:
discourse_client.log_out(current_user)
except (ValueError, AttributeError) as e:
msg = 'Discourse Logout Failed: {0}'.format(str(e))
flash(msg, 'error')
logout_user()
flash('You are now signed out', 'success')
return redirect(url_for('home.home'))
| # -*- coding: utf8 -*-
"""Views module for pybossa-discourse."""
from flask import Blueprint, request, url_for, flash, redirect
from flask import current_app as app
from flask.ext.login import logout_user, current_user
discourse_sso = app.extensions['discourse']['sso']
discourse_client = app.extensions['discourse']['client']
def index():
"""Attempt to sign in via SSO then redirect to Discourse."""
discourse_sso = app.extensions['discourse']['sso']
try:
url = discourse_sso.signin()
except AttributeError as e:
flash('Access Denied: {}'.format(str(e)), 'error')
return redirect(url_for('home.home'))
return redirect(url)
def oauth_authorized():
"""Authorise a Discourse login."""
discourse_sso = app.extensions['discourse']['sso']
sso = request.args.get('sso')
sig = request.args.get('sig')
if current_user.is_anonymous():
next_url = url_for('discourse.oauth_authorized', sso=sso, sig=sig)
return redirect(url_for('account.signin', next=next_url))
try:
url = discourse_sso.validate(sso, sig)
except (ValueError, AttributeError) as e:
flash('Access Denied: {0}'.format(str(e)), 'error')
return redirect(url_for('home.home'))
return redirect(url)
def signout():
"""Signout the current user from both PyBossa and Discourse."""
discourse_client = app.extensions['discourse']['client']
if not current_user.is_anonymous():
try:
discourse_client.log_out(current_user)
except (ValueError, AttributeError) as e:
msg = 'Discourse Logout Failed: {0}'.format(str(e))
flash(msg, 'error')
logout_user()
flash('You are now signed out', 'success')
return redirect(url_for('home.home'))
| bsd-3-clause | Python |
1fe9dbdbe1eaf02a9e53dfe5ce890358296f4efb | Increment version for release | pycroscopy/pycroscopy | pycroscopy/__version__.py | pycroscopy/__version__.py | version = '0.60.2'
time = '2018-07-30 14:04:51'
| version = '0.60.1'
time = '2018-06-19 09:41:25'
| mit | Python |
25dd1a2579195021900d641eddcc084c00084b25 | use numpy random | jameshicks/pydigree,jameshicks/pydigree | pydigree/recombination.py | pydigree/recombination.py | #!/usr/bin/env python
from array import array
from bisect import bisect_left
import numpy as np
def recombine(chr1, chr2, map):
newchrom = _recombine_haldane(chr1, chr2, map)
if isinstance(chr1, array) and isinstance(chr2, array):
if chr1.typecode != chr2.typecode:
raise ValueError('Chromosomes have two different typecodes!')
newchrom = array(chr1.typecode, newchrom)
return newchrom
def _recombine_haldane(chr1, chr2, map):
# The map is sorted list, and the last item will always be largest.
maxmap = map[-1]
nmark = len(map)
newchrom = []
# Randomly pick a chromosome to start from
# np.random.randint works on a half open interval, so the upper bound
# specified is 2. We'll get zeros and ones out of it.
flipped = np.random.randint(0,2)
last_crossover_index = 0
crossover_position = 0
while True:
# Get from the next chromosome
flipped = not flipped
c = chr1 if flipped else chr2
# Find the next crossover point
# np.random.exponential is parameterized with the RECIPROCAL of the
# rate parameter. With random.expovariate I would have used (0.01),
# here I supply 100 as an argument.
crossover_position += np.random.exponential(100)
if crossover_position > maxmap:
# We've reached the end of our journey here.
newchrom.extend(c[last_crossover_index:])
break
# Find the next crossover point in the chromosome by binary search
nextidx = bisect_left(map, crossover_position, last_crossover_index, nmark)
newchrom.extend(c[last_crossover_index:nextidx])
# Get ready to do it all over again
last_crossover_index = nextidx
return newchrom | #!/usr/bin/env python
import random
from array import array
from bisect import bisect_left
import numpy as np
def recombine(chr1, chr2, map):
newchrom = _recombine_haldane(chr1, chr2, map)
if isinstance(chr1, array) and isinstance(chr2, array):
if chr1.typecode != chr2.typecode:
raise ValueError('Chromosomes have two different typecodes!')
newchrom = array(chr1.typecode, newchrom)
return newchrom
def _recombine_haldane(chr1, chr2, map):
# The map is sorted list, and the last item will always be largest.
maxmap = map[-1]
nmark = len(map)
newchrom = []
# Randomly pick a chromosome to start from
flipped = random.choice((True, False))
last_crossover_index = 0
crossover_position = 0
while True:
# Get from the next chromosome
flipped = not flipped
c = chr1 if flipped else chr2
# Find the next crossover point
# np.random.exponential is parameterized with the RECIPROCAL of the
# rate parameter. With random.expovariate I would have used (0.01),
# here I supply 100 as an argument.
crossover_position += np.random.exponential(100)
if crossover_position > maxmap:
# We've reached the end of our journey here.
newchrom.extend(c[last_crossover_index:])
break
# Find the next crossover point in the chromosome by binary search
nextidx = bisect_left(map, crossover_position, last_crossover_index, nmark)
newchrom.extend(c[last_crossover_index:nextidx])
# Get ready to do it all over again
last_crossover_index = nextidx
return newchrom | apache-2.0 | Python |
8a29430d0254d4e5e9b13db5369d7bc2883a7ca7 | Update pylsy_test.py | bcho/Pylsy,muteness/Pylsy,bcho/Pylsy,huiyi1990/Pylsy,gnithin/Pylsy,gnithin/Pylsy,huiyi1990/Pylsy,muteness/Pylsy | pylsy/tests/pylsy_test.py | pylsy/tests/pylsy_test.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import unittest
from pylsy.pylsy import pylsytable
class PylsyTableTests(unittest.TestCase):
def setUp(self):
attributes = ["name", "age"]
self.table = pylsytable(attributes)
def tearDown(self):
self.table = None
def testCreateTable(self):
name = ["a", "b"]
self.table.add_data("name", name)
age = [1, 2]
self.table.add_data("age", age)
correct_file = open('correct.out', 'r')
correctPrint = correct_file.read()
try:
# import io
# from contextlib import redirect_stdout
# with io.StringIO() as buf, redirect_stdout(buf):
# print(self.table,end='')
output = self.table.__str__()
self.assertEqual(output, correctPrint)
except ImportError:
import sys
f_handler = open('test.out', 'w')
sys.stdout = f_handler
self.table.create_table()
f_handler.close()
f_handler = open('test.out', 'r')
self.assertEqual(f_handler.read(), correctPrint)
if __name__ == '__main__':
unittest.main()
| # -*- coding: utf-8 -*-
from __future__ import absolute_import
import unittest
import sys
sys.path.append('..')
from pylsy import pylsytable
class PylsyTableTests(unittest.TestCase):
def setUp(self):
attributes = ["name", "age"]
self.table = pylsytable(attributes)
def tearDown(self):
self.table = None
def testCreateTable(self):
name = ["a", "b"]
self.table.add_data("name", name)
age = [1, 2]
self.table.add_data("age", age)
correct_file = open('correct.out', 'r')
correctPrint = correct_file.read()
try:
# import io
# from contextlib import redirect_stdout
# with io.StringIO() as buf, redirect_stdout(buf):
# print(self.table,end='')
output = self.table.__str__()
self.assertEqual(output, correctPrint)
except ImportError:
import sys
f_handler = open('test.out', 'w')
sys.stdout = f_handler
self.table.create_table()
f_handler.close()
f_handler = open('test.out', 'r')
self.assertEqual(f_handler.read(), correctPrint)
if __name__ == '__main__':
unittest.main()
| mit | Python |
0cabd9f72f902a993d52bbae5a9f45580254a44f | return values for testcase | mhalder/pyproject,mhalder/pyproject | pyproject/simplemodule.py | pyproject/simplemodule.py | # -*- coding: utf-8 -*-
#
# Copyright 2012 Martin Halder <martin.halder@gmail.com>
#
# This file is part of PyProject.
#
# PyProject is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# PyProject is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PyProject. If not, see <http://www.gnu.org/licenses/>.
"""This is a very simple module.
:module: simplemodule
:moduleauthor: Martin Halder <martin.halder@gmail.com>
"""
class SimpleClass:
"""Demonstrate class docstrings."""
def __init__(self, one=1, two=2):
"""Set default attribute values only.
This class does nothing except setting the
default attributes.
:param one: first useless parameter
:param two: second one
:returns: nothing
:raises: nothing
"""
self.one = one
self.two = two
def do_something(self, thing=1):
"""simple useless function.
:param thing: some thing
:returns: 'three'
:raises: your salary
"""
return 'test'
| # -*- coding: utf-8 -*-
#
# Copyright 2012 Martin Halder <martin.halder@gmail.com>
#
# This file is part of PyProject.
#
# PyProject is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# PyProject is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PyProject. If not, see <http://www.gnu.org/licenses/>.
"""This is a very simple module.
:module: simplemodule
:moduleauthor: Martin Halder <martin.halder@gmail.com>
"""
class SimpleClass:
"""Demonstrate class docstrings."""
def __init__(self, one=1, two=2):
"""Set default attribute values only.
This class does nothing except setting the
default attributes.
:param one: first useless parameter
:param two: second one
:returns: nothing
:raises: nothing
"""
self.one = one
self.two = two
def do_something(self, thing=1):
"""simple useless function.
:param thing: some thing
:returns: never
:raises: your salary
"""
pass
| bsd-2-clause | Python |
a392e6fce8cbdf62390bd559f5b8c729c0ba1110 | Revert "Raise a warning when failing run_decoding_tests." | google/skia-buildbot,Tiger66639/skia-buildbot,google/skia-buildbot,google/skia-buildbot,google/skia-buildbot,Tiger66639/skia-buildbot,google/skia-buildbot,google/skia-buildbot,google/skia-buildbot,Tiger66639/skia-buildbot,google/skia-buildbot,Tiger66639/skia-buildbot,Tiger66639/skia-buildbot,Tiger66639/skia-buildbot,Tiger66639/skia-buildbot | slave/skia_slave_scripts/run_decoding_tests.py | slave/skia_slave_scripts/run_decoding_tests.py | #!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Run the Skia skimage executable. """
from build_step import BuildStep
import sys
class RunDecodingTests(BuildStep):
def _Run(self):
cmd = ['-r', self._device_dirs.SKImageInDir()]
if self._gm_image_subdir is not None:
expectations_name = self._gm_image_subdir + '.json'
# Read expectations, which were downloaded/copied to the device.
expectations_file = self.DevicePathJoin(
self._device_dirs.SKImageExpectedDir(),
expectations_name)
if self.DevicePathExists(expectations_file):
cmd.extend(['--readExpectationsPath', expectations_file])
# Write the expectations file, in case any did not match.
output_expectations_file = self.DevicePathJoin(
self._device_dirs.SKImageOutDir(),
expectations_name)
cmd.extend(['--createExpectationsPath', output_expectations_file])
# Draw any mismatches to the same folder as the output json.
cmd.extend(['--mismatchPath', self._device_dirs.SKImageOutDir()])
self.RunFlavoredCmd('skimage', cmd)
if '__main__' == __name__:
sys.exit(BuildStep.RunBuildStep(RunDecodingTests))
| #!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Run the Skia skimage executable. """
from build_step import BuildStep, BuildStepWarning
import sys
class RunDecodingTests(BuildStep):
def _Run(self):
cmd = ['-r', self._device_dirs.SKImageInDir()]
if self._gm_image_subdir is not None:
expectations_name = self._gm_image_subdir + '.json'
# Read expectations, which were downloaded/copied to the device.
expectations_file = self.DevicePathJoin(
self._device_dirs.SKImageExpectedDir(),
expectations_name)
if self.DevicePathExists(expectations_file):
cmd.extend(['--readExpectationsPath', expectations_file])
# Write the expectations file, in case any did not match.
output_expectations_file = self.DevicePathJoin(
self._device_dirs.SKImageOutDir(),
expectations_name)
cmd.extend(['--createExpectationsPath', output_expectations_file])
# Draw any mismatches to the same folder as the output json.
cmd.extend(['--mismatchPath', self._device_dirs.SKImageOutDir()])
try:
self.RunFlavoredCmd('skimage', cmd)
except Exception as e:
print "========= Exception in run_decoding_tests ========"
raise BuildStepWarning(e)
if '__main__' == __name__:
sys.exit(BuildStep.RunBuildStep(RunDecodingTests))
| bsd-3-clause | Python |
36d99edb6747fea58bb1effbfc306af09939779e | use transaction | shownotes/snotes20-restapi,shownotes/snotes20-restapi | snotes20/management/commands/importexternal.py | snotes20/management/commands/importexternal.py | import logging
import datetime
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
from snotes20.datasources import sources
import snotes20.models as models
logger = logging.getLogger(__name__)
def import_from_source(source):
logger.info("downloading Podcasts")
podcasts = source.get_podcasts()
logger.info("downloading Episodes")
yesterday = (datetime.date.today() - datetime.timedelta(1))
tomorrow = (datetime.date.today() + datetime.timedelta(1))
episodes = source.get_episodes(yesterday, tomorrow)
with transaction.atomic():
logger.info("importing Podcasts")
import_thing(source, podcasts, models.Podcast.objects)
logger.info("importing Episodes")
import_thing(source, episodes, models.Episode.objects.filter(document=None))
def import_thing(source, data, oqry):
for entry in data:
qry = oqry.filter(source_id=entry.source_id).filter(source=source.shortname)
if qry.exists():
dbentry = qry.get()
logger.debug("updating {}".format(dbentry))
else:
logger.debug("creating {}".format(entry))
entry.save()
class Command(BaseCommand):
args = ''
help = 'Updates all external datasources'
def handle(self, *args, **options):
logger.info("importing from external sources")
for source in sources:
logger.info("import from {}".format(source.name))
import_from_source(source)
| from django.core.management.base import BaseCommand, CommandError
import logging
import datetime
from snotes20.datasources import sources
import snotes20.models as models
logger = logging.getLogger(__name__)
def import_from_source(source):
logger.info("importing Podcasts")
podcasts = source.get_podcasts()
import_thing(source, podcasts, models.Podcast.objects)
logger.info("importing Episodes")
yesterday = (datetime.date.today() - datetime.timedelta(1))
tomorrow = (datetime.date.today() + datetime.timedelta(1))
episodes = source.get_episodes(yesterday, tomorrow)
import_thing(source, episodes, models.Episode.objects.filter(document=None))
def import_thing(source, data, oqry):
for entry in data:
qry = oqry.filter(source_id=entry.source_id).filter(source=source.shortname)
if qry.exists():
dbentry = qry.get()
logger.debug("updating {}".format(dbentry))
else:
logger.debug("creating {}".format(entry))
entry.save()
class Command(BaseCommand):
args = ''
help = 'Updates all external datasources'
def handle(self, *args, **options):
logger.info("importing from external sources")
for source in sources:
logger.info("import from {}".format(source.name))
import_from_source(source)
| agpl-3.0 | Python |
f828b79ed30edcc87837d820e6b5712de16a3118 | bump version -> 2.2.0.dev3 | omry/omegaconf | omegaconf/version.py | omegaconf/version.py | import sys # pragma: no cover
__version__ = "2.2.0.dev3"
msg = """OmegaConf 2.0 and above is compatible with Python 3.6 and newer.
You have the following options:
1. Upgrade to Python 3.6 or newer.
This is highly recommended. new features will not be added to OmegaConf 1.4.
2. Continue using OmegaConf 1.4:
You can pip install 'OmegaConf<1.5' to do that.
"""
if sys.version_info < (3, 6):
raise ImportError(msg) # pragma: no cover
| import sys # pragma: no cover
__version__ = "2.2.0.dev2"
msg = """OmegaConf 2.0 and above is compatible with Python 3.6 and newer.
You have the following options:
1. Upgrade to Python 3.6 or newer.
This is highly recommended. new features will not be added to OmegaConf 1.4.
2. Continue using OmegaConf 1.4:
You can pip install 'OmegaConf<1.5' to do that.
"""
if sys.version_info < (3, 6):
raise ImportError(msg) # pragma: no cover
| bsd-3-clause | Python |
4c5be9a3b99ad47a1492a2b3481498273ec3a6cd | bump repo version | omry/omegaconf | omegaconf/version.py | omegaconf/version.py | import sys # pragma: no cover
__version__ = "2.0.0rc24"
msg = """OmegaConf 2.0 and above is compatible with Python 3.6 and newer.
You have the following options:
1. Upgrade to Python 3.6 or newer.
This is highly recommended. new features will not be added to OmegaConf 1.4.
2. Continue using OmegaConf 1.4:
You can pip install 'OmegaConf<1.5' to do that.
"""
if sys.version_info < (3, 6):
raise ImportError(msg) # pragma: no cover
| import sys # pragma: no cover
__version__ = "2.0.0rc23"
msg = """OmegaConf 2.0 and above is compatible with Python 3.6 and newer.
You have the following options:
1. Upgrade to Python 3.6 or newer.
This is highly recommended. new features will not be added to OmegaConf 1.4.
2. Continue using OmegaConf 1.4:
You can pip install 'OmegaConf<1.5' to do that.
"""
if sys.version_info < (3, 6):
raise ImportError(msg) # pragma: no cover
| bsd-3-clause | Python |
3e8d1efc04d28b11f0ca8b4c14f84b4fdd3b88ba | Fix import error | DarkmatterVale/regex4dummies | regex4dummies/__init__.py | regex4dummies/__init__.py | from regex4dummies import regex4dummies
from toolkit import Toolkit
| from regex4dummies import regex4dummies
| mit | Python |
2bcda9a2e80386cbd0f269b422ad9fa693a5b4de | Add "Python :: 3" classifier to reprozip-jupyter | ViDA-NYU/reprozip,ViDA-NYU/reprozip,ViDA-NYU/reprozip,ViDA-NYU/reprozip,ViDA-NYU/reprozip | reprozip-jupyter/setup.py | reprozip-jupyter/setup.py | import io
import os
from setuptools import setup
# pip workaround
os.chdir(os.path.abspath(os.path.dirname(__file__)))
# Need to specify encoding for PY3, which has the worst unicode handling ever
with io.open('README.rst', encoding='utf-8') as fp:
description = fp.read()
setup(name='reprozip-jupyter',
version='0.4',
packages=['reprozip_jupyter'],
package_data={'reprozip_jupyter': ['notebook-extension.js']},
entry_points={
'console_scripts': [
'reprozip-jupyter = reprozip_jupyter.main:main']},
install_requires=['rpaths',
'notebook', 'jupyter_client', 'nbformat', 'nbconvert',
'reprounzip>=1.0'],
description="Jupyter Notebook tracing/reproduction using ReproZip",
author="Remi Rampin, Fernando Chirigati, Dennis Shasha, Juliana Freire",
author_email='reprozip-users@vgc.poly.edu',
maintainer="Remi Rampin",
maintainer_email='remirampin@gmail.com',
url='https://www.reprozip.org/',
project_urls={
'Homepage': 'https://github.com/ViDA-NYU/reprozip',
'Documentation': 'https://docs.reprozip.org/',
'Examples': 'https://examples.reprozip.org/',
'Say Thanks': 'https://saythanks.io/to/remram44',
'Source': 'https://github.com/ViDA-NYU/reprozip',
'Tracker': 'https://github.com/ViDA-NYU/reprozip/issues',
},
long_description=description,
license='BSD',
keywords=['reprozip', 'reprounzip', 'reproducibility', 'provenance',
'vida', 'nyu', 'jupyter', 'notebook'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering',
'Topic :: System :: Archiving'])
| import io
import os
from setuptools import setup
# pip workaround
os.chdir(os.path.abspath(os.path.dirname(__file__)))
# Need to specify encoding for PY3, which has the worst unicode handling ever
with io.open('README.rst', encoding='utf-8') as fp:
description = fp.read()
setup(name='reprozip-jupyter',
version='0.4',
packages=['reprozip_jupyter'],
package_data={'reprozip_jupyter': ['notebook-extension.js']},
entry_points={
'console_scripts': [
'reprozip-jupyter = reprozip_jupyter.main:main']},
install_requires=['rpaths',
'notebook', 'jupyter_client', 'nbformat', 'nbconvert',
'reprounzip>=1.0'],
description="Jupyter Notebook tracing/reproduction using ReproZip",
author="Remi Rampin, Fernando Chirigati, Dennis Shasha, Juliana Freire",
author_email='reprozip-users@vgc.poly.edu',
maintainer="Remi Rampin",
maintainer_email='remirampin@gmail.com',
url='https://www.reprozip.org/',
project_urls={
'Homepage': 'https://github.com/ViDA-NYU/reprozip',
'Documentation': 'https://docs.reprozip.org/',
'Examples': 'https://examples.reprozip.org/',
'Say Thanks': 'https://saythanks.io/to/remram44',
'Source': 'https://github.com/ViDA-NYU/reprozip',
'Tracker': 'https://github.com/ViDA-NYU/reprozip/issues',
},
long_description=description,
license='BSD',
keywords=['reprozip', 'reprounzip', 'reproducibility', 'provenance',
'vida', 'nyu', 'jupyter', 'notebook'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2.7',
'Topic :: Scientific/Engineering',
'Topic :: System :: Archiving'])
| bsd-3-clause | Python |
0df3fe9b900fcfb96953195f4f5ed013cd4ebb55 | update version | bird-house/pywps-proxy,bird-house/twitcher,bird-house/pywps-proxy | twitcher/__init__.py | twitcher/__init__.py | import logging
logger = logging.getLogger(__name__)
__version__ = '0.3.3'
def main(global_config, **settings):
"""
This function returns a Pyramid WSGI application.
"""
from pyramid.config import Configurator
config = Configurator(settings=settings)
# include twitcher components
config.include('twitcher.config')
config.include('twitcher.frontpage')
config.include('twitcher.rpcinterface')
config.include('twitcher.owsproxy')
config.include('twitcher.wps')
# tweens/middleware
# TODO: maybe add tween for exception handling or use unknown_failure view
config.include('twitcher.tweens')
config.scan()
return config.make_wsgi_app()
| import logging
logger = logging.getLogger(__name__)
__version__ = '0.3.2'
def main(global_config, **settings):
"""
This function returns a Pyramid WSGI application.
"""
from pyramid.config import Configurator
config = Configurator(settings=settings)
# include twitcher components
config.include('twitcher.config')
config.include('twitcher.frontpage')
config.include('twitcher.rpcinterface')
config.include('twitcher.owsproxy')
config.include('twitcher.wps')
# tweens/middleware
# TODO: maybe add tween for exception handling or use unknown_failure view
config.include('twitcher.tweens')
config.scan()
return config.make_wsgi_app()
| apache-2.0 | Python |
e9af799900df8f3dd6f4b55f796e918c80a48b5a | Handle 'Interrupted system call' corectly in InputHookContext. | ddalex/python-prompt-toolkit,jonathanslenders/python-prompt-toolkit,jaseg/python-prompt-toolkit,amjith/python-prompt-toolkit,melund/python-prompt-toolkit,ALSchwalm/python-prompt-toolkit,niklasf/python-prompt-toolkit | prompt_toolkit/eventloop/inputhook.py | prompt_toolkit/eventloop/inputhook.py | """
Similar to `PyOS_InputHook` of the Python API. Some eventloops can have an
inputhook to allow easy integration with other event loops.
When the eventloop of prompt-toolkit is idle, it can call such a hook. This
hook can call another eventloop that runs for a short while, for instance to
keep a graphical user interface responsive.
It's the responsibility of this hook to exit when there is input ready.
There are two ways to detect when input is ready:
- Call the `input_is_ready` method periodically. Quit when this returns `True`.
- Add the `fileno` as a watch to the external eventloop. Quit when file descriptor
becomes readable. (But don't read from it.)
Note that this is not the same as checking for `sys.stdin.fileno()`. The
eventloop of prompt-toolkit allows thread-based executors, for example for
asynchronous autocompletion. When the completion for instance is ready, we
also want prompt-toolkit to gain control again in order to display that.
An alternative to using input hooks, is to create a custom `EventLoop` class that
controls everything.
"""
from __future__ import unicode_literals
import os
import threading
__all__ = (
'InputHookContext',
)
class InputHookContext(object):
"""
Given as a parameter to the inputhook.
"""
def __init__(self, inputhook):
assert callable(inputhook)
self.inputhook = inputhook
self._input_is_ready = None
self._r, self._w = os.pipe()
def input_is_ready(self):
"""
Return True when the input is ready.
"""
return self._input_is_ready(wait=False)
def fileno(self):
"""
File descriptor that will become ready when the event loop needs to go on.
"""
return self._r
def call_inputhook(self, input_is_ready_func):
"""
Call the inputhook. (Called by a prompt-toolkit eventloop.)
"""
self._input_is_ready = input_is_ready_func
# Start thread that activates this pipe when there is input to process.
def thread():
input_is_ready_func(wait=True)
os.write(self._w, b'x')
threading.Thread(target=thread).start()
# Call inputhook.
self.inputhook(self)
# Flush the read end of the pipe.
try:
os.read(self._r, 1024)
except OSError:
# This happens when the window resizes and a SIGWINCH was received.
# We get 'Error: [Errno 4] Interrupted system call'
# Just ignore.
pass
self._input_is_ready = None
def close(self):
"""
Clean up resources.
"""
if self._r:
os.close(self._r)
os.close(self._w)
self._r = self._w = None
| """
Similar to `PyOS_InputHook` of the Python API. Some eventloops can have an
inputhook to allow easy integration with other event loops.
When the eventloop of prompt-toolkit is idle, it can call such a hook. This
hook can call another eventloop that runs for a short while, for instance to
keep a graphical user interface responsive.
It's the responsibility of this hook to exit when there is input ready.
There are two ways to detect when input is ready:
- Call the `input_is_ready` method periodically. Quit when this returns `True`.
- Add the `fileno` as a watch to the external eventloop. Quit when file descriptor
becomes readable. (But don't read from it.)
Note that this is not the same as checking for `sys.stdin.fileno()`. The
eventloop of prompt-toolkit allows thread-based executors, for example for
asynchronous autocompletion. When the completion for instance is ready, we
also want prompt-toolkit to gain control again in order to display that.
An alternative to using input hooks, is to create a custom `EventLoop` class that
controls everything.
"""
from __future__ import unicode_literals
import os
import threading
__all__ = (
'InputHookContext',
)
class InputHookContext(object):
"""
Given as a parameter to the inputhook.
"""
def __init__(self, inputhook):
assert callable(inputhook)
self.inputhook = inputhook
self._input_is_ready = None
self._r, self._w = os.pipe()
def input_is_ready(self):
"""
Return True when the input is ready.
"""
return self._input_is_ready(wait=False)
def fileno(self):
"""
File descriptor that will become ready when the event loop needs to go on.
"""
return self._r
def call_inputhook(self, input_is_ready_func):
"""
Call the inputhook. (Called by a prompt-toolkit eventloop.)
"""
self._input_is_ready = input_is_ready_func
# Start thread that activates this pipe when there is input to process.
def thread():
input_is_ready_func(wait=True)
os.write(self._w, b'x')
threading.Thread(target=thread).start()
# Call inputhook.
self.inputhook(self)
# Flush the read end of the pipe.
os.read(self._r, 1024)
self._input_is_ready = None
def close(self):
"""
Clean up resources.
"""
if self._r:
os.close(self._r)
os.close(self._w)
self._r = self._w = None
| bsd-3-clause | Python |
b2bab786c4af3dcca7d35b1e6ecff8699e542ec4 | Add a pytest hook for creating the coverage data_file directory | jbeezley/girder,jbeezley/girder,girder/girder,kotfic/girder,jbeezley/girder,data-exp-lab/girder,Xarthisius/girder,data-exp-lab/girder,girder/girder,RafaelPalomar/girder,jbeezley/girder,girder/girder,kotfic/girder,manthey/girder,kotfic/girder,girder/girder,RafaelPalomar/girder,Xarthisius/girder,RafaelPalomar/girder,Xarthisius/girder,data-exp-lab/girder,manthey/girder,manthey/girder,RafaelPalomar/girder,data-exp-lab/girder,RafaelPalomar/girder,Kitware/girder,manthey/girder,data-exp-lab/girder,Xarthisius/girder,Kitware/girder,Xarthisius/girder,kotfic/girder,Kitware/girder,kotfic/girder,Kitware/girder | pytest_girder/pytest_girder/plugin.py | pytest_girder/pytest_girder/plugin.py | import os
from .fixtures import * # noqa
def pytest_configure(config):
"""
Create the necessary directories for coverage. This is necessary because neither coverage nor
pytest-cov have support for making the data_file directory before running.
"""
covPlugin = config.pluginmanager.get_plugin('_cov')
if covPlugin is not None:
covPluginConfig = covPlugin.cov_controller.cov.config
covDataFileDir = os.path.dirname(covPluginConfig.data_file)
try:
os.makedirs(covDataFileDir)
except OSError:
pass
def pytest_addoption(parser):
group = parser.getgroup('girder')
group.addoption('--mock-db', action='store_true', default=False,
help='Whether or not to mock the database using mongomock.')
group.addoption('--mongo-uri', action='store', default='mongodb://localhost:27017',
help=('The base URI to the MongoDB instance to use for database connections, '
'default is mongodb://localhost:27017'))
group.addoption('--drop-db', action='store', default='both',
choices=('both', 'pre', 'post', 'never'),
help='When to destroy testing databases, default is both '
'(before and after running tests)')
| from .fixtures import * # noqa
def pytest_addoption(parser):
group = parser.getgroup('girder')
group.addoption('--mock-db', action='store_true', default=False,
help='Whether or not to mock the database using mongomock.')
group.addoption('--mongo-uri', action='store', default='mongodb://localhost:27017',
help=('The base URI to the MongoDB instance to use for database connections, '
'default is mongodb://localhost:27017'))
group.addoption('--drop-db', action='store', default='both',
choices=('both', 'pre', 'post', 'never'),
help='When to destroy testing databases, default is both '
'(before and after running tests)')
| apache-2.0 | Python |
61799c59f5acddeb79248f5f2244b047cb737e29 | move get_posts and get_stream methods into User | davejlin/treehouse,davejlin/treehouse,davejlin/treehouse,davejlin/treehouse,davejlin/treehouse,davejlin/treehouse,davejlin/treehouse,davejlin/treehouse | python/flask/social-network/models.py | python/flask/social-network/models.py | import datetime
from flask_bcrypt import generate_password_hash
from flask_login import UserMixin
from peewee import *
DATABASE = SqliteDatabase('social.db')
class User(UserMixin, Model):
username = CharField(unique=True)
email = CharField(unique=True)
password = CharField(max_length=100)
joined_at = DateTimeField(default=datetime.datetime.now)
is_admin = BooleanField(default=False)
class Meta:
database = DATABASE
order_by = ('-joined_at',)
def get_posts(self):
return Post.select().where(Post.user == self)
def get_stream(self):
return Post.select().where(
(Post.user == self)
)
@classmethod
def create_user(cls, username, email, password, admin=False):
try:
cls.create(
username=username,
email=email,
password=generate_password_hash(password),
is_admin=admin)
except IntegrityError:
raise ValueError("User already exists")
def Post(Model):
timestamp = DateTimeField(default=datetime.datetime.now)
user = ForeignKeyField(
rel_model=User,
related_name='posts'
)
content = TextField()
class Meta:
database = DATABASE
order_by = ('-timestamp',)
def initialize():
DATABASE.connect()
DATABASE.create_tables([User], safe=True)
DATABASE.close() | import datetime
from flask_bcrypt import generate_password_hash
from flask_login import UserMixin
from peewee import *
DATABASE = SqliteDatabase('social.db')
class User(UserMixin, Model):
username = CharField(unique=True)
email = CharField(unique=True)
password = CharField(max_length=100)
joined_at = DateTimeField(default=datetime.datetime.now)
is_admin = BooleanField(default=False)
class Meta:
database = DATABASE
order_by = ('-joined_at',)
@classmethod
def create_user(cls, username, email, password, admin=False):
try:
cls.create(
username=username,
email=email,
password=generate_password_hash(password),
is_admin=admin)
except IntegrityError:
raise ValueError("User already exists")
def Post(Model):
timestamp = DateTimeField(default=datetime.datetime.now)
user = ForeignKeyField(
rel_model=User,
related_name='posts'
)
content = TextField()
class Meta:
database = DATABASE
order_by = ('-timestamp',)
def get_posts(self):
return Post.select().where(Post.user == self)
def get_stream(self):
return Post.select().where(
(Post.user == self)
)
def initialize():
DATABASE.connect()
DATABASE.create_tables([User], safe=True)
DATABASE.close() | unlicense | Python |
65330d63992fa2f8696308a06991cb9b031668b1 | Update curlCallee.py | mudragada/util-scripts | pythonworks/Curls/Curls/curlCallee.py | pythonworks/Curls/Curls/curlCallee.py | from curlRequester import sendCurlRequest
username = 'admin'
password = 'password'
instancesFile = 'instancesspreadout'
def BrowsePageServiceFlush():
path = 'url/?shouldInvokeMethod=flushAllBrowseCaches'
typeList = ['831','832','834']
curlOnSocketsFromFile(instancesFile, path , username, password, typeList)
def curlOnSocketsFromFile(inputFileName, path,username, password):
with open(inputFileName,'r') as fileName:
lines = fileName.read().splitlines()
for line in lines:
sendCurlRequest(line,path,username,password)
| __author__ = 'Krishna Mudragada'
from curlRequester import sendCurlRequest
username = 'krishna'
password = 'Eagles@123'
instancesFile = 'instancesspreadout'
def BrowsePageServiceFlush():
path = 'aeo/commerce/catalog/services/BrowsePageService/?shouldInvokeMethod=flushAllBrowseCaches'
typeList = ['831','832','834']
curlOnSocketsFromFile(instancesFile, path , username, password, typeList)
def curlOnSocketsFromFile(inputFileName, path,username, password):
with open(inputFileName,'r') as fileName:
lines = fileName.read().splitlines()
for line in lines:
sendCurlRequest(line,path,username,password)
| mit | Python |
8a26c2f9fefdf76aaffaf8ee276f810e10935796 | Implement EvidenceRequest.__eq__() | sherlocke/pywatson | pywatson/question/evidence_request.py | pywatson/question/evidence_request.py | class EvidenceRequest(object):
"""Include this with a Question to request evidence from Watson"""
def __init__(self, items=3, profile=False):
self.items = items
self.profile = profile
def __eq__(self, other):
"""Return True iff self is equivalent to other
:param other: an EvidenceRequest
:return: True or False
"""
if self is other:
return True
if not isinstance(other, EvidenceRequest):
return False
if self.items != other.items:
return False
if self.profile != other.profile:
return False
return True
| class EvidenceRequest(object):
"""Include this with a Question to request evidence from Watson"""
def __init__(self, items=3, profile=False):
self.items = items
self.profile = profile
def __eq__(self, other):
return False
| mit | Python |
b1e6f3eacccb5e575ac47b6a40809f4671510672 | Drop Python 2 support in split_level utility function | rsinger86/drf-flex-fields | rest_flex_fields/utils.py | rest_flex_fields/utils.py | from collections.abc import Iterable
def is_expanded(request, key):
""" Examines request object to return boolean of whether
passed field is expanded.
"""
expand = request.query_params.get("expand", "")
expand_fields = []
for e in expand.split(","):
expand_fields.extend([e for e in e.split(".")])
return "~all" in expand_fields or key in expand_fields
def split_levels(fields):
"""
Convert dot-notation such as ['a', 'a.b', 'a.d', 'c'] into
current-level fields ['a', 'c'] and next-level fields
{'a': ['b', 'd']}.
"""
first_level_fields = []
next_level_fields = {}
if not fields:
return first_level_fields, next_level_fields
assert (
isinstance(fields, Iterable)
), "`fields` must be iterable (e.g. list, tuple, or generator)"
if isinstance(fields, str):
fields = [a.strip() for a in fields.split(",") if a.strip()]
for e in fields:
if "." in e:
first_level, next_level = e.split(".", 1)
first_level_fields.append(first_level)
next_level_fields.setdefault(first_level, []).append(next_level)
else:
first_level_fields.append(e)
first_level_fields = list(set(first_level_fields))
return first_level_fields, next_level_fields
| try:
# Python 3
from collections.abc import Iterable
string_types = (str,)
except ImportError:
# Python 2
from collections import Iterable
string_types = (str, unicode)
def is_expanded(request, key):
""" Examines request object to return boolean of whether
passed field is expanded.
"""
expand = request.query_params.get("expand", "")
expand_fields = []
for e in expand.split(","):
expand_fields.extend([e for e in e.split(".")])
return "~all" in expand_fields or key in expand_fields
def split_levels(fields):
"""
Convert dot-notation such as ['a', 'a.b', 'a.d', 'c'] into
current-level fields ['a', 'c'] and next-level fields
{'a': ['b', 'd']}.
"""
first_level_fields = []
next_level_fields = {}
if not fields:
return first_level_fields, next_level_fields
assert (
isinstance(fields, Iterable)
), "`fields` must be iterable (e.g. list, tuple, or generator)"
if isinstance(fields, string_types):
fields = [a.strip() for a in fields.split(",") if a.strip()]
for e in fields:
if "." in e:
first_level, next_level = e.split(".", 1)
first_level_fields.append(first_level)
next_level_fields.setdefault(first_level, []).append(next_level)
else:
first_level_fields.append(e)
first_level_fields = list(set(first_level_fields))
return first_level_fields, next_level_fields
| mit | Python |
40980253f3ea8dd03e8f14d25f9098c8c910d989 | Update validate-binary-search-tree.py | kamyu104/LeetCode,kamyu104/LeetCode,githubutilities/LeetCode,githubutilities/LeetCode,kamyu104/LeetCode,githubutilities/LeetCode,yiwen-luo/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,yiwen-luo/LeetCode,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,jaredkoontz/leetcode,githubutilities/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,yiwen-luo/LeetCode,yiwen-luo/LeetCode,jaredkoontz/leetcode,jaredkoontz/leetcode,yiwen-luo/LeetCode,jaredkoontz/leetcode,githubutilities/LeetCode,jaredkoontz/leetcode,tudennis/LeetCode---kamyu104-11-24-2015 | Python/validate-binary-search-tree.py | Python/validate-binary-search-tree.py | # Time: O(n)
# Space: O(1)
#
# Given a binary tree, determine if it is a valid binary search tree (BST).
#
# Assume a BST is defined as follows:
#
# The left subtree of a node contains only nodes with keys less than the node's key.
# The right subtree of a node contains only nodes with keys greater than the node's key.
# Both the left and right subtrees must also be binary search trees.
#
# Definition for a binary tree node
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
# Morris Traversal Solution
class Solution:
# @param root, a tree node
# @return a list of integers
def isValidBST(self, root):
prev, cur = None, root
while cur:
if cur.left is None:
if prev and prev.val >= cur.val:
return False
prev = cur
cur = cur.right
else:
node = cur.left
while node.right and node.right != cur:
node = node.right
if node.right is None:
node.right = cur
cur = cur.left
else:
if prev and prev.val >= cur.val:
return False
node.right = None
prev = cur
cur = cur.right
return True
# Time: O(n)
# Space: O(logn)
class Solution2:
# @param root, a tree node
# @return a boolean
def isValidBST(self, root):
return self.isValidBSTRecu(root, float("-inf"), float("inf"))
def isValidBSTRecu(self, root, low, high):
if root is None:
return True
return low < root.val and root.val < high \
and self.isValidBSTRecu(root.left, low, root.val) \
and self.isValidBSTRecu(root.right, root.val, high)
if __name__ == "__main__":
root = TreeNode(2)
root.left = TreeNode(1)
root.right = TreeNode(3)
print Solution().isValidBST(root)
| # Time: O(n)
# Space: O(logn)
#
# Given a binary tree, determine if it is a valid binary search tree (BST).
#
# Assume a BST is defined as follows:
#
# The left subtree of a node contains only nodes with keys less than the node's key.
# The right subtree of a node contains only nodes with keys greater than the node's key.
# Both the left and right subtrees must also be binary search trees.
#
# Definition for a binary tree node
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
# @param root, a tree node
# @return a boolean
def isValidBST(self, root):
return self.isValidBSTRecu(root, float("-inf"), float("inf"))
def isValidBSTRecu(self, root, low, high):
if root is None:
return True
return low < root.val and root.val < high \
and self.isValidBSTRecu(root.left, low, root.val) \
and self.isValidBSTRecu(root.right, root.val, high)
if __name__ == "__main__":
root = TreeNode(2)
root.left = TreeNode(1)
root.right = TreeNode(3)
print Solution().isValidBST(root) | mit | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.