commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
49d28814c498d1698c61b8eeae3c3e3e019a09c3 | add recipe3 scrap | scrap/recipe3.py | scrap/recipe3.py | import scrapy
class Recipe3Spider(scrapy.Spider):
name = "recipe3"
download_delay = 0.5
start_urls = [
"http://www.cuisineaz.com/recettes/recherche_v2.aspx?recherche={}".format(r)
for r in [
'bases',
'aperitifs',
'entrees',
'plats',
'desserts',
'accompagnements',
'recettes-pas-cheres',
'viandes',
'poissons',
'legumes',
'fruits',
'fromages',
'repas',
'cher',
'farine',
'sucre',
'facile',
]
]
def parse(self, response):
url = response.css('.pagination-next a::attr(href)').extract_first()
if url:
page = response.urljoin(url.strip())
yield scrapy.Request(page, callback=self.parse)
recipes = response.css('#titleRecette a::attr(href)').extract()
for recipe in recipes:
page = response.urljoin(recipe.strip())
yield scrapy.Request(page, callback=self.parse_recipe)
return
def parse_recipe(self, response):
yield {
'uri': response.url,
'recipe': response.css('.recipe_main h1::text').extract_first(),
'breadcrumb': [],
'quantity': response.css('#ctl00_ContentPlaceHolder_LblRecetteNombre::text').extract_first(),
'content': response.css('.recipe_ingredients ul').extract_first()
}
| Python | 0.000373 | |
f486343277a94e511ea1e152ca6b69f12fd657a0 | Create droidgpspush.py | droidgpspush.py | droidgpspush.py | import androidhelper
import socket
import time
droid = androidhelper.Android()
port=12345
s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.connect(("10.201.19.201",port)) #connecting to pi as client
droid.makeToast("Starting location fetch") #notify me
while True:
location = droid.getLastKnownLocation().result
location = location.get('network', location.get('gps')) #fetch location
data = str(location)
print(data) #logging
s.send(data) #send to server
time.sleep(5) #wait for 5 seconds
| Python | 0 | |
fc0d54ff6d6b6ca91727c7aa0832f6c6dfc64967 | Add a prototype WinNT updater | rwho-update-winnt.py | rwho-update-winnt.py | #!/usr/bin/python
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
Half of this hasn't been implemented yet.
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
import ctypes as c
import socket as so
import win32api as api
#import win32con as con
import win32ts as ts
import win32security as sec
import json
from urllib import urlencode
import urllib2
from time import sleep
class WTS_INFO_CLASS():
InitialProgram = 0
ApplicationName = 1
WorkingDirectory = 2
OEMId = 3
SessionId = 4
UserName = 5
WinStationName = 6
DomainName = 7
ConnectState = 8
ClientBuildNumber = 9
ClientName = 10
ClientDirectory = 11
ClientProductId = 12
ClientHardwareId = 13
ClientAddress = 14
ClientDisplay = 15
ClientProtocolType = 16
IdleTime = 17
LogonTime = 18
IncomingBytes = 19
OutgoingBytes = 20
IncomingFrames = 21
OutgoingFrames = 22
ClientInfo = 23
SessionInfo = 24
SessionInfoEx = 25
ConfigInfo = 26
ValidationInfo = 27
SessionAddressV4 = 28
IsRemoteSession = 29
def _wtsapi_WTSQuerySessionInformation(hServer, sessionID, infoClass):
ppBuffer = c.c_int32()
pBytesReturned = c.c_int32()
if c.windll.wtsapi32.WTSQuerySessionInformationW(
c.c_int32(hServer), c.c_int32(sessionID), c.c_int32(infoClass),
c.byref(ppBuffer), c.byref(pBytesReturned)):
return (ppBuffer, pBytesReturned)
SERVER_URL = "http://equal.cluenet.org/~grawity/rwho/server.php"
def get_sessions():
protocols = {
ts.WTS_PROTOCOL_TYPE_CONSOLE: "console",
ts.WTS_PROTOCOL_TYPE_ICA: "citrix",
ts.WTS_PROTOCOL_TYPE_RDP: "rdp",
}
hServer = ts.WTS_CURRENT_SERVER_HANDLE
#hServer = ts.WTSOpenServer("digit.cluenet.org")
curSessId = ts.WTSGetActiveConsoleSessionId()
for sess in ts.WTSEnumerateSessions(hServer):
utent = {}
id = sess["SessionId"]
for key, const in {
"User": ts.WTSUserName,
"Address": ts.WTSClientAddress,
"Client": ts.WTSClientName,
"Protocol": ts.WTSClientProtocolType,
#"XClient": 23, #ts.WTSClientInfo,
#"XSession": 24, #ts.WTSSessionInfo,
}.items():
sess[key] = ts.WTSQuerySessionInformation(hServer, id, const)
if not sess["User"]:
# skip non-login sessions
continue
if sess["State"] != 0:
continue
userSid, userDomain, acctType = sec.LookupAccountName(None, sess["User"])
userSidAuths = [userSid.GetSubAuthority(i) for i in range(userSid.GetSubAuthorityCount())]
utent["user"] = sess["User"]
utent["uid"] = userSidAuths[-1]
utent["host"] = ""
utent["line"] = "%s/%s" % (sess["WinStationName"].lower(), id)
utent["time"] = 0
#utent["proto"] = protocols.get(sess["Protocol"], "unknown")
print "="*79
for k, v in sess.items():
print "%-10s: %s" % (k, repr(v))
print
for k, v in utent.items():
print "%-10s: %s" % (k, repr(v))
yield utent
def upload(utmp):
data = {
"host": so.gethostname().lower(),
"fqdn": so.getfqdn().lower(),
"action": "put",
"utmp": json.dumps(utmp),
}
resp = urllib2.urlopen(SERVER_URL, urlencode(data))
print resp.read()
utmp = list(get_sessions())
upload(utmp)
| Python | 0 | |
6d25c1958a84eb1a6004ebadec6769511974cca4 | add basic rsa by request | basic-rsa/rsa.py | basic-rsa/rsa.py | def main():
e = int('3', 16)
n = int('64ac4671cb4401e906cd273a2ecbc679f55b879f0ecb25eefcb377ac724ee3b1', 16)
d = int('431d844bdcd801460488c4d17487d9a5ccc95698301d6ab2e218e4b575d52ea3', 16)
c = int('599f55a1b0520a19233c169b8c339f10695f9e61c92bd8fd3c17c8bba0d5677e', 16)
m = pow(c, d, n)
print(hex(m))
| Python | 0 | |
ad4471dfc4210e34b66c65293f71f7ba4936beba | Create arbitrage_algo.py | vnpy/app/algo_trading/algos/arbitrage_algo.py | vnpy/app/algo_trading/algos/arbitrage_algo.py | from vnpy.trader.constant import Direction
from vnpy.trader.object import TradeData, OrderData
from vnpy.trader.engine import BaseEngine
from vnpy.app.algo_trading import AlgoTemplate
class ArbitrageAlgo(AlgoTemplate):
""""""
display_name = "Arbitrage 套利"
default_setting = {
"active_vt_symbol": "",
"passive_vt_symbol": "",
"spread_up": 0.0,
"spread_down": 0.0,
"max_pos": 0,
"interval": 0,
}
variables = [
"timer_count",
"active_vt_orderid",
"passive_vt_orderid",
"net_pos",
"acum_pos"
]
def __init__(
self,
algo_engine: BaseEngine,
algo_name: str,
setting: dict
):
""""""
super().__init__(algo_engine, algo_name, setting)
# Parameters
self.active_vt_symbol = setting["active_vt_symbol"]
self.passive_vt_symbol = setting["passive_vt_symbol"]
self.spread_up = setting["spread_up"]
self.spread_down = setting["spread_down"]
self.max_pos = setting["max_pos"]
self.interval = setting["interval"]
# Variables
self.active_vt_orderid = ""
self.passive_vt_orderid = ""
self.net_pos = 0
self.acum_pos = 0
self.timer_count = 0
self.subscribe(self.active_vt_symbol)
self.subscribe(self.passive_vt_symbol)
self.put_parameters_event()
self.put_variables_event()
def on_stop(self):
""""""
self.write_log("停止算法")
def on_order(self, order: OrderData):
""""""
if order.vt_symbol == self.active_vt_symbol:
if not order.is_active():
self.active_vt_orderid = ""
elif order.vt_symbol == self.passive_vt_symbol:
if not order.is_active():
self.passive_vt_orderid = ""
self.put_variables_event()
def on_trade(self, trade: TradeData):
""""""
# Update net position volume
if trade.direction == Direction.LONG:
self.net_pos += trade.volume
else:
self.net_pos -= trade.volume
# Update active symbol position
if trade.vt_symbol == self.active_vt_symbol:
if trade.direction == Direction.LONG:
self.acum_pos += trade.volume
else:
self.acum_pos -= trade.volume
# Hedge if active symbol traded
if trade.vt_symbol == self.active_vt_symbol:
self.hedge()
self.put_variables_event()
def on_timer(self):
""""""
self.timer_count += 1
if self.timer_count < self.interval:
self.put_variables_event()
return
self.timer_count = 0
if self.active_vt_orderid or self.passive_vt_orderid:
self.cancel_all()
return
if self.net_pos:
self.hedge()
return
active_tick = self.get_tick(self.active_vt_symbol)
passive_tick = self.get_tick(self.passive_vt_symbol)
if not active_tick or not passive_tick:
return
# Calculate spread
spread_bid_price = active_tick.bid_price_1 - passive_tick.ask_price_1
spread_ask_price = active_tick.ask_price_1 - passive_tick.bid_price_1
spread_bid_volume = min(active_tick.bid_volume_1, passive_tick.ask_volume_1)
spread_ask_volume = min(active_tick.ask_volume_1, passive_tick.bid_volume_1)
# Sell condition
if spread_bid_price > self.spread_up:
if self.acum_pos <= -self.max_pos:
return
else:
self.active_vt_orderid = self.sell(
self.active_vt_symbol,
active_tick.bid_price_1,
spread_bid_volume
)
# Buy condition
elif spread_ask_price < -self.spread_down:
if self.acum_pos >= self.max_pos:
return
else:
self.active_vt_orderid = self.buy(
self.active_vt_symbol,
active_tick.ask_price_1,
spread_ask_volume
)
self.put_variables_event()
def hedge(self):
""""""
tick = self.get_tick(self.passive_vt_symbol)
volume = abs(self.net_pos)
if self.net_pos > 0:
self.passive_vt_orderid = self.sell(
self.passive_vt_symbol,
tick.bid_price_5,
volume
)
elif self.net_pos < 0:
self.passive_vt_orderid = self.buy(
self.passive_vt_symbol,
tick.ask_price_5,
volume
)
| Python | 0.000257 | |
6be3e0c5264ca2750a77ac1dbd4175502e51fd3c | Add argparse tests for ceph-deploy admin | ceph_deploy/tests/parser/test_admin.py | ceph_deploy/tests/parser/test_admin.py | import pytest
from ceph_deploy.cli import get_parser
class TestParserAdmin(object):
def setup(self):
self.parser = get_parser()
def test_admin_help(self, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args('admin --help'.split())
out, err = capsys.readouterr()
assert 'usage: ceph-deploy admin' in out
assert 'positional arguments:' in out
assert 'optional arguments:' in out
def test_admin_host_required(self, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args('admin'.split())
out, err = capsys.readouterr()
assert "error: too few arguments" in err
def test_admin_one_host(self):
args = self.parser.parse_args('admin host1'.split())
assert args.client == ['host1']
def test_admin_multiple_hosts(self):
hostnames = ['host1', 'host2', 'host3']
args = self.parser.parse_args(['admin'] + hostnames)
assert args.client == hostnames
| Python | 0 | |
2ee5f1e3563e5a7104515adf74e41a8781fbcd9e | Create exercise5.py | exercise5.py | exercise5.py | # -- coding: utf-8 --
my_name = 'Zed A. Shaw'
my_age = 35 # not a lie
my_height = 74 # inches
my_weight = 180 # lbs
my_eyes = 'Blue'
my_teeth = 'White'
my_hair = 'Brown'
print "Let's talk about %s." % my_name
print "He's %d inches tall." % my_height
print "He's %d pounds heavy." % my_weight
print "Actually that's not too heavy."
print "He's got %s eyes and %s hair." % (my_eyes, my_hair)
print "His teeth are usually %s depending on the coffee." % my_teeth
# this line is tricky, try to get it exactly right
print "If I add %d, %d, and %d I get %d." % (
my_age, my_height, my_weight, my_age + my_height + my_weight)
| Python | 0.000001 | |
1dbd94379662638639945bd58910f3a0e67bb61e | Create backend.py | backend.py | backend.py | # -*- coding: utf-8 -*-
from bs4 import BeautifulSoup
import requests
import urllib
import time
import os
import sys
import re
import lyrics as minilyrics
import services as s
if sys.platform == "win32":
import win32gui
elif sys.platform == "darwin":
import subprocess
else:
import subprocess
import dbus
# With Sync. Of course, there is one for now, but for the sake of
# make the code a little bit more cleaner, is declared.
services_list1 = [s._minilyrics]
# Without Sync.
services_list2 = [s._wikia, s._musixmatch, s._songmeanings, s._songlyrics, s._genius, s._versuri]
artist = ""
song = ""
url = ""
'''
current_service is used to store the current index of the list.
Useful to change the lyrics with the button "Next Lyric" if
the service returned a wrong song
'''
current_service = -1
def load_lyrics(artist, song, sync=False):
error = "Error: Could not find lyrics."
global current_service
if current_service == len(services_list2)-1: current_service = -1
if sync == True:
lyrics, url, timed = s._minilyrics(artist, song)
current_service = -1
if sync == True and lyrics == error or sync == False:
timed = False
for i in range (current_service+1, len(services_list2)):
lyrics, url = services_list2[i](artist, song)
current_service = i
if lyrics != error:
lyrics = lyrics.replace("&", "&").replace("`", "'").strip()
break
#return "Error: Could not find lyrics." if the for loop doens't find any lyrics
return(lyrics, url, timed)
def getlyrics(songname, sync=False):
global artist, song, url, current_service
artist = ""
song = ""
url = ""
current_service = -1
if songname.count(" - ") == 1:
artist, song = songname.rsplit(" - ", 1)
if songname.count(" - ") == 2:
artist, song, garbage = songname.rsplit(" - ", 2)
if " / " in song:
song, garbage = song.rsplit(" / ", 1)
song = re.sub(' \(.*?\)', '', song, flags=re.DOTALL)
return load_lyrics(artist, song, sync)
def next_lyrics():
global current_service
lyrics, url, timed = load_lyrics(artist, song)
return (lyrics, url, timed)
def getwindowtitle():
if sys.platform == "win32":
spotify = win32gui.FindWindow('SpotifyMainWindow', None)
windowname = win32gui.GetWindowText(spotify)
elif sys.platform == "darwin":
windowname = ''
try:
command = "osascript getCurrentSong.AppleScript"
windowname = subprocess.check_output(["/bin/bash", "-c", command]).decode("utf-8")
except Exception:
pass
else:
windowname = ''
session = dbus.SessionBus()
spotifydbus = session.get_object("org.mpris.MediaPlayer2.spotify", "/org/mpris/MediaPlayer2")
spotifyinterface = dbus.Interface(spotifydbus, "org.freedesktop.DBus.Properties")
metadata = spotifyinterface.Get("org.mpris.MediaPlayer2.Player", "Metadata")
try:
command = "xwininfo -tree -root"
windows = subprocess.check_output(["/bin/bash", "-c", command]).decode("utf-8")
spotify = ''
for line in windows.splitlines():
if '("spotify" "Spotify")' in line:
if " - " in line:
spotify = line
break
if spotify == '':
windowname = 'Spotify'
except Exception:
pass
if windowname != 'Spotify':
windowname = "%s - %s" %(metadata['xesam:artist'][0], metadata['xesam:title'])
if "—" in windowname:
windowname = windowname.replace("—", "-")
if "Spotify - " in windowname:
windowname = windowname.strip("Spotify - ")
return(windowname)
def versioncheck():
proxy = urllib.request.getproxies()
try:
currentversion = requests.get("https://raw.githubusercontent.com/fr31/spotifylyrics/master/currentversion", timeout=5, proxies=proxy).text
except Exception:
return(True)
try:
if version() >= float(currentversion):
return(True)
else:
return(False)
except Exception:
return(True)
def version():
version = 1.14
return(version)
def main():
if os.name == "nt":
os.system("chcp 65001")
def clear():
if os.name == "nt":
os.system("cls")
else:
os.system("clear")
clear()
oldsongname = ""
while True:
songname = getwindowtitle()
if oldsongname != songname:
if songname != "Spotify":
oldsongname = songname
clear()
# print(songname+"\n")
lyrics, url, timed = getlyrics(songname)
# print(lyrics+"\n")
time.sleep(1)
if __name__ == '__main__':
main()
| Python | 0.000002 | |
2523d34d4f3e26a408c7ec0e43708efea77f03a9 | Add to support the chinese library | workflow/cndic_naver_search.py | workflow/cndic_naver_search.py | # Naver Search Workflow for Alfred 2
# Copyright (C) 2013 Jinuk Baek
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import sys
from workflow import web, Workflow
def get_dictionary_data(word):
url = 'http://ac.cndic.naver.com/ac2'
params = dict(q=word,
_callback='',
q_enc='utf-8',
st=11,
r_lt='00',
t_koreng=1,
r_format='json',
r_enc='utf-8',
r_unicode=0,
r_escape=1)
r = web.get(url, params)
r.raise_for_status()
return r.json()
def main(wf):
import cgi;
args = wf.args[0]
wf.add_item(title = 'Search Naver Cndic for \'%s\'' % args,
autocomplete=args,
arg=args,
valid=True)
def wrapper():
return get_dictionary_data(args)
res_json = wf.cached_data("cn_%s" % args, wrapper, max_age=600)
for item in res_json['items']:
for ltxt in item:
if len(ltxt) > 0:
txt = ltxt[0][0];
rtxt = cgi.escape(ltxt[1][0]);
wf.add_item(title = u"%s %s" % (txt, rtxt) ,
subtitle = 'Search Naver Cndic for \'%s\'' % txt,
autocomplete=txt,
arg=txt,
valid=True);
wf.send_feedback()
if __name__ == '__main__':
wf = Workflow()
sys.exit(wf.run(main))
| Python | 0 | |
ac0e7cb6ff2885457ccbe9f7311489edf7c9406b | create train object utils | mozi/utils/train_object_utils.py | mozi/utils/train_object_utils.py | from __future__ import absolute_import
from __future__ import print_function
import matplotlib
# matplotlib.use('Agg')
import theano
import theano.tensor as T
import numpy as np
import matplotlib.pyplot as plt
from theano.compile.ops import as_op
from mozi.utils.progbar import Progbar
import tarfile, inspect, os
from six.moves.urllib.request import urlretrieve
floatX = theano.config.floatX
def split_list(tuple_list):
"""
DESCRIPTION:
split a list of tuples into two lists whereby one list contains the first elements
of the tuples and the other list contains the second elements.
PARAM:
tuple_list: a list of tuples, example tuple_list = [('a', 1), ('b', 2)]
RETURN:
two lists, example from above tuple_list will be split into ['a', 'b'] and [1, 2]
"""
ls_A = []
ls_B = []
for tuple in tuple_list:
ls_A.append(tuple[0])
ls_B.append(tuple[1])
return ls_A, ls_B
def generate_shared_list(ls):
"""
DESCRIPTION:
generate a list of shared variables that matched the length of ls
PARAM:
ls: the list used for generating the shared variables
RETURN:
a list of shared variables initialized to 0 of len(ls)
"""
rlist = []
for i in xrange(len(ls)):
rlist.append(theano.shared(np.array(0., dtype=theano.config.floatX)))
return rlist
def merge_lists(ls_A, ls_B):
"""
DESCRIPTION:
merge two lists of equal length into into a list of tuples
PARAM:
ls_A: first list
ls_B: second list
RETURN:
a list of tuples
"""
assert len(ls_A) == len(ls_B), 'two lists of different length'
rlist = []
for a, b in zip(ls_A, ls_B):
rlist.append((a,b))
return rlist
def get_shared_values(shared_ls):
"""
DESCRIPTION:
get a list of values from a list of shared variables
PARAM:
shared_ls: list of shared variables
RETURN:
numpy array of the list of values
"""
val_ls = []
for var in shared_ls:
val_ls.append(var.get_value())
return np.asarray(val_ls, dtype=theano.config.floatX)
def is_shared_var(var):
return var.__class__.__name__ == 'TensorSharedVariable' or \
var.__class__.__name__ == 'CudaNdarraySharedVariable'
def merge_var(*vars):
def absortvar(v):
rvar = []
if isinstance(v, (list, tuple)):
rvar += v
else:
rvar.append(v)
return rvar
rvars = []
for var in vars:
rvars += absortvar(var)
return rvars
| Python | 0.000018 | |
1768a69163c50e5e964eaf110323e590f13b4ff0 | add 0000 file | Drake-Z/0000/0000.py | Drake-Z/0000/0000.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'第 0000 题:将你的 QQ 头像(或者微博头像)右上角加上红色的数字,类似于微信未读信息数量那种提示效果。 类似于图中效果'
__author__ = 'Drake-Z'
from PIL import Image, ImageDraw, ImageFont
def add_num(filname, text = '4', fillcolor = (255, 0, 0)):
img = Image.open(filname)
width, height = img.size
myfont = ImageFont.truetype('C:/windows/fonts/Arial.ttf', size=width//8)
fillcolor = (255, 0, 0)
draw = ImageDraw.Draw(img)
draw.text((width-width//8, 0), text, font=myfont, fill=fillcolor)
img.save('1.jpg','jpeg')
return 0
if __name__ == '__main__':
filname = '0.jpg'
text = '4'
fillcolor = (255, 0, 0)
add_num(filname, text, fillcolor) | Python | 0.000001 | |
ebc2b419a3cc7cace9c79d1c5032a2ae33b8bff1 | Remove unused imports | custom/up_nrhm/reports/asha_reports.py | custom/up_nrhm/reports/asha_reports.py | from corehq.apps.reports.filters.select import MonthFilter, YearFilter
from corehq.apps.reports.generic import GenericTabularReport
from corehq.apps.reports.standard import CustomProjectReport, DatespanMixin
from corehq.apps.reports.filters.dates import DatespanFilter
from custom.up_nrhm.filters import DrillDownOptionFilter, SampleFormatFilter
from custom.up_nrhm.reports.asha_facilitators_report import ASHAFacilitatorsReport
from custom.up_nrhm.reports.block_level_month_report import BlockLevelMonthReport
def total_rows(report):
if not report.report_config.get('sf'):
return {
"total_under_facilitator": getattr(report, 'total_under_facilitator', 0),
"total_with_checklist": getattr(report, 'total_with_checklist', 0)
}
return {}
class ASHAReports(GenericTabularReport, DatespanMixin, CustomProjectReport):
fields = [SampleFormatFilter, DatespanFilter, DrillDownOptionFilter, MonthFilter, YearFilter]
name = "ASHA Reports"
slug = "asha_reports"
show_all_rows = True
default_rows = 20
printable = True
report_template_path = "up_nrhm/asha_report.html"
extra_context_providers = [total_rows]
no_value = '--'
@property
def report_config(self):
config = {
'sf': self.request.GET.get('sf'),
}
return config
@property
def report_context(self):
context = super(ASHAReports, self).report_context
context['sf'] = self.request.GET.get('sf')
return context
@property
def model(self):
config = self.report_config
if config.get('sf') == 'sf5':
return []
elif config.get('sf') == 'sf4':
return []
elif config.get('sf') == 'sf3':
return BlockLevelMonthReport(self.request, domain=self.domain)
else:
return ASHAFacilitatorsReport(self.request, domain=self.domain)
@property
def headers(self):
return self.model.headers
@property
def rows(self):
config = self.report_config
if not config.get('sf'):
rows, self.total_under_facilitator, total_with_checklist = self.model.rows
else:
rows = self.model.rows
return rows
| import datetime
from dateutil.relativedelta import relativedelta
from corehq.apps.reports.filters.select import MonthFilter, YearFilter
from corehq.apps.reports.generic import GenericTabularReport
from corehq.apps.reports.standard import CustomProjectReport, DatespanMixin
from corehq.apps.reports.filters.dates import DatespanFilter
from custom.up_nrhm.filters import DrillDownOptionFilter, SampleFormatFilter
from custom.up_nrhm.reports.asha_facilitators_report import ASHAFacilitatorsReport
from custom.up_nrhm.reports.block_level_af import BlockLevelAFReport
from custom.up_nrhm.reports.block_level_month_report import BlockLevelMonthReport
def total_rows(report):
if not report.report_config.get('sf'):
return {
"total_under_facilitator": getattr(report, 'total_under_facilitator', 0),
"total_with_checklist": getattr(report, 'total_with_checklist', 0)
}
return {}
class ASHAReports(GenericTabularReport, DatespanMixin, CustomProjectReport):
fields = [SampleFormatFilter, DatespanFilter, DrillDownOptionFilter, MonthFilter, YearFilter]
name = "ASHA Reports"
slug = "asha_reports"
show_all_rows = True
default_rows = 20
printable = True
report_template_path = "up_nrhm/asha_report.html"
extra_context_providers = [total_rows]
no_value = '--'
@property
def report_config(self):
config = {
'sf': self.request.GET.get('sf'),
}
return config
@property
def report_context(self):
context = super(ASHAReports, self).report_context
context['sf'] = self.request.GET.get('sf')
return context
@property
def model(self):
config = self.report_config
if config.get('sf') == 'sf5':
return []
elif config.get('sf') == 'sf4':
return []
elif config.get('sf') == 'sf3':
return BlockLevelMonthReport(self.request, domain=self.domain)
else:
return ASHAFacilitatorsReport(self.request, domain=self.domain)
@property
def headers(self):
return self.model.headers
@property
def rows(self):
config = self.report_config
if not config.get('sf'):
rows, self.total_under_facilitator, total_with_checklist = self.model.rows
else:
rows = self.model.rows
return rows
| Python | 0.000001 |
3d8f02eb7c1b9b363143f25af9eadeb94c43b4ae | increase uwnetid maxlength | myuw/migrations/0017_netidlen.py | myuw/migrations/0017_netidlen.py | # Generated by Django 2.0.13 on 2020-03-12 17:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myuw', '0016_myuw_notice_group'),
]
operations = [
migrations.AlterField(
model_name='user',
name='uwnetid',
field=models.SlugField(max_length=32, unique=True),
),
]
| Python | 0.000015 | |
53a0e58bb68c3fb247a65fabf6c80b5bb41f440e | Fix custom attribute test factories | test/integration/ggrc/models/factories.py | test/integration/ggrc/models/factories.py | # Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Factories for models"""
# pylint: disable=too-few-public-methods,missing-docstring,old-style-class
# pylint: disable=no-init
import random
import factory
from ggrc import db
from ggrc import models
def random_string(prefix=''):
return '{prefix}{suffix}'.format(
prefix=prefix,
suffix=random.randint(0, 9999999999),
)
class ModelFactory(factory.Factory):
@classmethod
def _create(cls, target_class, *args, **kwargs):
instance = target_class(*args, **kwargs)
db.session.add(instance)
db.session.commit()
return instance
class TitledFactory(factory.Factory):
title = factory.LazyAttribute(lambda m: random_string('title'))
class CustomAttributeDefinitionFactory(ModelFactory, TitledFactory):
class Meta:
model = models.CustomAttributeDefinition
definition_type = None
definition_id = None
attribute_type = "Text"
multi_choice_options = None
class CustomAttributeValueFactory(ModelFactory):
class Meta:
model = models.CustomAttributeValue
custom_attribute = None
attributable_id = None
attributable_type = None
attribute_value = None
attribute_object_id = None
class DirectiveFactory(ModelFactory, TitledFactory):
class Meta:
model = models.Directive
class ControlFactory(ModelFactory, TitledFactory):
class Meta:
model = models.Control
directive = factory.SubFactory(DirectiveFactory)
kind_id = None
version = None
documentation_description = None
verify_frequency_id = None
fraud_related = None
key_control = None
active = None
notes = None
class AssessmentFactory(ModelFactory, TitledFactory):
class Meta:
model = models.Assessment
class ControlCategoryFactory(ModelFactory):
class Meta:
model = models.ControlCategory
name = factory.LazyAttribute(lambda m: random_string('name'))
lft = None
rgt = None
scope_id = None
depth = None
required = None
class CategorizationFactory(ModelFactory):
class Meta:
model = models.Categorization
category = None
categorizable = None
category_id = None
categorizable_id = None
categorizable_type = None
class ContextFactory(ModelFactory):
class Meta:
model = models.Context
name = factory.LazyAttribute(
lambda obj: random_string("SomeObjectType Context"))
related_object = None
class ProgramFactory(ModelFactory):
class Meta:
model = models.Program
title = factory.LazyAttribute(lambda _: random_string("program_title"))
slug = factory.LazyAttribute(lambda _: random_string(""))
class AuditFactory(ModelFactory):
class Meta:
model = models.Audit
title = factory.LazyAttribute(lambda _: random_string("audit title "))
slug = factory.LazyAttribute(lambda _: random_string(""))
status = "Planned"
program_id = factory.LazyAttribute(lambda _: ProgramFactory().id)
context_id = factory.LazyAttribute(lambda _: ContextFactory().id)
class AssessmentTemplateFactory(ModelFactory):
class Meta:
model = models.AssessmentTemplate
title = factory.LazyAttribute(
lambda _: random_string("assessment template title"))
template_object_type = None
test_plan_procedure = False
procedure_description = factory.LazyAttribute(
lambda _: random_string("lorem ipsum description"))
default_people = \
"{\"assessors\":\"Object Owners\",\"verifiers\":\"Object Owners\"}"
class ContractFactory(ModelFactory):
class Meta:
model = models.Contract
class EventFactory(ModelFactory):
class Meta:
model = models.Event
revisions = []
class RelationshipFactory(ModelFactory):
class Meta:
model = models.Relationship
source = None
destination = None
class RelationshipAttrFactory(ModelFactory):
class Meta:
model = models.RelationshipAttr
relationship_id = None
attr_name = None
attr_value = None
class PersonFactory(ModelFactory):
class Meta:
model = models.Person
| # Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Factories for models"""
# pylint: disable=too-few-public-methods,missing-docstring,old-style-class
# pylint: disable=no-init
import random
import factory
from ggrc import db
from ggrc import models
def random_string(prefix=''):
return '{prefix}{suffix}'.format(
prefix=prefix,
suffix=random.randint(0, 9999999999),
)
class ModelFactory(factory.Factory):
@classmethod
def _create(cls, target_class, *args, **kwargs):
instance = target_class(*args, **kwargs)
db.session.add(instance)
db.session.commit()
return instance
class TitledFactory(factory.Factory):
title = factory.LazyAttribute(lambda m: random_string('title'))
class CustomAttributeDefinitionFactory(ModelFactory):
class Meta:
model = models.CustomAttributeDefinition
title = None
definition_type = None
definition_id = None
attribute_type = None
multi_choice_options = None
class CustomAttributeValueFactory(ModelFactory):
class Meta:
model = models.CustomAttributeValue
custom_attribute_id = None
attributable_id = None
attributable_type = None
attribute_value = None
attribute_object_id = None
class DirectiveFactory(ModelFactory, TitledFactory):
class Meta:
model = models.Directive
class ControlFactory(ModelFactory, TitledFactory):
class Meta:
model = models.Control
directive = factory.SubFactory(DirectiveFactory)
kind_id = None
version = None
documentation_description = None
verify_frequency_id = None
fraud_related = None
key_control = None
active = None
notes = None
class AssessmentFactory(ModelFactory, TitledFactory):
class Meta:
model = models.Assessment
class ControlCategoryFactory(ModelFactory):
class Meta:
model = models.ControlCategory
name = factory.LazyAttribute(lambda m: random_string('name'))
lft = None
rgt = None
scope_id = None
depth = None
required = None
class CategorizationFactory(ModelFactory):
class Meta:
model = models.Categorization
category = None
categorizable = None
category_id = None
categorizable_id = None
categorizable_type = None
class ContextFactory(ModelFactory):
class Meta:
model = models.Context
name = factory.LazyAttribute(
lambda obj: random_string("SomeObjectType Context"))
related_object = None
class ProgramFactory(ModelFactory):
class Meta:
model = models.Program
title = factory.LazyAttribute(lambda _: random_string("program_title"))
slug = factory.LazyAttribute(lambda _: random_string(""))
class AuditFactory(ModelFactory):
class Meta:
model = models.Audit
title = factory.LazyAttribute(lambda _: random_string("audit title "))
slug = factory.LazyAttribute(lambda _: random_string(""))
status = "Planned"
program_id = factory.LazyAttribute(lambda _: ProgramFactory().id)
context_id = factory.LazyAttribute(lambda _: ContextFactory().id)
class AssessmentTemplateFactory(ModelFactory):
class Meta:
model = models.AssessmentTemplate
title = factory.LazyAttribute(
lambda _: random_string("assessment template title"))
template_object_type = None
test_plan_procedure = False
procedure_description = factory.LazyAttribute(
lambda _: random_string("lorem ipsum description"))
default_people = \
"{\"assessors\":\"Object Owners\",\"verifiers\":\"Object Owners\"}"
class ContractFactory(ModelFactory):
class Meta:
model = models.Contract
class EventFactory(ModelFactory):
class Meta:
model = models.Event
revisions = []
class RelationshipFactory(ModelFactory):
class Meta:
model = models.Relationship
source = None
destination = None
class RelationshipAttrFactory(ModelFactory):
class Meta:
model = models.RelationshipAttr
relationship_id = None
attr_name = None
attr_value = None
class PersonFactory(ModelFactory):
class Meta:
model = models.Person
| Python | 0.000001 |
1e7b84155623691fb9fc1cec4efa6386938f3e72 | Add missing migration (updating validators=) | core/migrations/0055_update_username_validators.py | core/migrations/0055_update_username_validators.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-22 22:03
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0054_add_provider__cloud_config_and_timezone'),
]
operations = [
migrations.AlterField(
model_name='atmosphereuser',
name='username',
field=models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=30, unique=True, validators=[django.core.validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username. This value may contain only letters, numbers and @/./+/-/_ characters.')], verbose_name='username'),
),
]
| Python | 0 | |
48217e5317412a9b5fb8181b6915963783efeaf2 | Add test for kline result of exact amount | tests/test_historical_klines.py | tests/test_historical_klines.py | #!/usr/bin/env python
# coding=utf-8
from binance.client import Client
import pytest
import requests_mock
client = Client('api_key', 'api_secret')
def test_exact_amount():
"""Test Exact amount returned"""
first_res = []
row = [1519892340000,"0.00099400","0.00099810","0.00099400","0.00099810","4806.04000000",1519892399999,"4.78553253",154,"1785.14000000","1.77837524","0"]
for i in range(0, 500):
first_res.append(row)
second_res = []
with requests_mock.mock() as m:
m.get('https://api.binance.com/api/v1/klines?interval=1m&limit=500&startTime=1519862400000&symbol=BNBBTC', json=first_res)
m.get('https://api.binance.com/api/v1/klines?interval=1m&limit=500&startTime=1519892400000&symbol=BNBBTC', json=second_res)
client.get_historical_klines(
symbol="BNBBTC",
interval=Client.KLINE_INTERVAL_1MINUTE,
start_str="1st March 2018"
)
| Python | 0 | |
b1069d90130070c1fb5f35ba2d455bead235f8b2 | add views.py of django | django/views.py | django/views.py | from django.shortcuts import render
from django.db.models import Q
from django.shortcuts import render_to_response
from search.engine.search import dosearch
from django.core.paginator import Paginator,EmptyPage, PageNotAnInteger
import string
import sys
from search.engine.query_process import query_parser_no_stopwords,lower_letters
from search.engine.spelling_correct import correctch
import os
import json
import codecs
import pickle
module_dir = os.path.dirname(__file__)
file_path = os.path.join(module_dir, r'engine/data/')
f_path_color = file_path +"color.dat"
f = open(f_path_color)
color=pickle.load(f)
f.close()
f_path_suggest = file_path +"autocomplete.dat"
f = open(f_path_suggest)
autocomplete_list = pickle.load(f)
f.close()
def is_eng_word(word):
val = True
include = set(string.letters+string.digits)
for ch in word:
if ch not in include:
val = False
break
return val
def tag_filter(word_list):
exclude = set(string.punctuation+u" ")
res_list = []
for word in word_list:
word = ''.join(ch for ch in word if ch not in exclude)
if is_eng_word(word) is False:
if len(word)<5 and len(word)>1:
res_list.append(word)
else:
if len(word)<8 and len(word)>1:
res_list.append(word)
if u" " in res_list:
res_list.remove(u" ")
return res_list
def search(request):
query = request.GET.get('q', '')
sensitive_words =[u"\u4e60\u8fd1\u5e73",u"\u674e\u514b\u5f3a",u"\u80e1\u9526\u6d9b",u"\u6e29\u5bb6\u5b9d"]
chn = 1
if query in sensitive_words:
chn = 0
res = dosearch(query)
terms = query_parser_no_stopwords(query)
if query not in terms:
terms.append(query)
sug_flag = 0
sug_query_list=[]
sug_str=""
query_cor = correctch(lower_letters(query))
if query_cor==lower_letters(query):
for t in terms:
if t != query:
t_cor=correctch(t)
if t_cor!=t:
word_flag = 1
sug_flag = 1
else:
word_flag = 0
sug_query_list.append((word_flag,t_cor))
sug_str = sug_str + t_cor
if is_eng_word(t_cor):
sug_str = sug_str+u" "
else:
sug_flag = 1
sug_query_list.append((1,query_cor))
sug_str = query_cor
album = []
singer = []
play_count_num = []
title = []
share_count = []
lrc = []
title_color={}
singer_color={}
result = []
if res:
for i in res:
path = file_path + i+'.json'
f = codecs.open(path.decode('utf-8'),'r')
j = json.load(f)
f.close()
tag_list =tag_filter(j["tag"])
r = int(color[j["title"]][1:3],16)
g = int(color[j["title"]][3:5],16)
b = int(color[j["title"]][5:7],16)
L = 0.2126*(float(r)/255)**2.2+0.7152*(float(g)/255)**2.2+0.0722*(float(b)/255)**2.2
if L > 0.5:
title_color[j["title"]]="#000000"
singer_color[j["title"]] = "#444444"
else:
title_color[j["title"]]="#ffffff"
singer_color[j["title"]]="#dddddd"
if len(tag_list)>=3:
result.append((j["title"],j["singer"],j["album"],j["ID"],color[j["title"]],title_color[j["title"]],singer_color[j["title"]],j["play_count_num"],tag_list[0],tag_list[1],tag_list[2]))
elif len(tag_list)==2:
result.append((j["title"],j["singer"],j["album"],j["ID"],color[j["title"]],title_color[j["title"]],singer_color[j["title"]],j["play_count_num"],tag_list[0],tag_list[1]))
elif len(tag_list)==1:
result.append((j["title"],j["singer"],j["album"],j["ID"],color[j["title"]],title_color[j["title"]],singer_color[j["title"]],j["play_count_num"],tag_list[0]))
else :
result.append((j["title"],j["singer"],j["album"],j["ID"],color[j["title"]],title_color[j["title"]],singer_color[j["title"]],j["play_count_num"]))
else:
res=[]
result=[]
paginator = Paginator(result,10)
page = request.GET.get('page')
try:
results = paginator.page(page)
except PageNotAnInteger:
results = paginator.page(1)
except EmptyPage:
results = paginator.page(paginator.num_pages)
return render_to_response("search.html", {
"query": query,
"res": results,
"sug_flag":sug_flag,
"sug_query_list":sug_query_list,
"sug_str":sug_str,
"autocomplete_list":autocomplete_list,
"chn":chn,
})
| Python | 0 | |
1f3a15b8ae6ffcb96faaf0acab940d9590fe6cb1 | Add migration | fat/migrations/0064_auto_20160809_1559.py | fat/migrations/0064_auto_20160809_1559.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-08-09 15:59
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fat', '0063_auto_20160809_1545'),
]
operations = [
migrations.AlterField(
model_name='expense',
name='status',
field=models.CharField(choices=[('W', 'Not submitted yet'), ('S', 'Submitted (awaiting processing)'), ('C', 'Administrator checking'), ('P', 'Authoriser checking'), ('A', 'Approved (submitted to finance)'), ('F', 'Finished')], default='P', max_length=1),
),
]
| Python | 0.000002 | |
5ec3f8dbe9f044d08a80563c05b648590fabdda7 | add fibonnaci example | examples/fib.py | examples/fib.py | # / 0 if i is 0
# fib(i) = | 1 if i is 1
# \ fib(i - 1) + fib(i - 2) otherwise
def fib(n):
""" Imperative definition of Fibonacci numbers """
a, b = 0, 1
for i in range(n):
a, b = b, a + b
return b
# This is intuitive but VERY slow
def fib(n):
""" Functional definition of Fibonacci numbers """
if n == 0 or n == 1:
return n
else:
return fib(n - 1) + fib(n - 2)
from toolz import memoize
# Oh wait, it's fast again
fib = memoize(fib)
| Python | 0.999895 | |
c663f6b6e31832fae682c2c527955b13682b701e | Remove learner_testimonials column from course_metadata course run table | course_discovery/apps/course_metadata/migrations/0127_remove_courserun_learner_testimonials.py | course_discovery/apps/course_metadata/migrations/0127_remove_courserun_learner_testimonials.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-11-07 17:16
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('course_metadata', '0126_course_has_ofac_restrictions'),
]
operations = [
migrations.RemoveField(
model_name='courserun',
name='learner_testimonials',
),
]
| Python | 0.000002 | |
8b1bd5995ff4c95335e25e19962724e6d8c399d7 | Create 0003_auto_20150930_1132.py | cities/migrations/0003_auto_20150930_1132.py | cities/migrations/0003_auto_20150930_1132.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('cities', '0002_auto_20150811_1912'),
]
operations = [
migrations.AddField(
model_name='city',
name='name_de',
field=models.CharField(db_index=True, verbose_name='ascii name', null=True, max_length=200),
),
migrations.AddField(
model_name='city',
name='name_en',
field=models.CharField(db_index=True, verbose_name='ascii name', null=True, max_length=200),
),
migrations.AddField(
model_name='country',
name='name_de',
field=models.CharField(db_index=True, verbose_name='ascii name', null=True, max_length=200),
),
migrations.AddField(
model_name='country',
name='name_en',
field=models.CharField(db_index=True, verbose_name='ascii name', null=True, max_length=200),
),
]
| Python | 0.000016 | |
b75e10f3235e9215458071279b67910627a95180 | Add celery based job runner | ceam/framework/celery_tasks.py | ceam/framework/celery_tasks.py | import os
from time import time
import logging
import pandas as pd
from celery import Celery
from billiard import current_process
app = Celery()
@app.task(autoretry_for=(Exception,), max_retries=2)
def worker(draw_number, component_config, branch_config, logging_directory):
worker = current_process().index
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', filename=os.path.join(logging_directory, str(worker)+'.log'), level=logging.DEBUG)
logging.info('Starting job: {}'.format((draw_number, component_config, branch_config)))
run_configuration = component_config['configuration'].get('run_configuration', {})
results_directory = run_configuration['results_directory']
run_configuration['run_id'] = str(worker)+'_'+str(time())
if branch_config is not None:
run_configuration['run_key'] = dict(branch_config)
run_configuration['run_key']['draw'] = draw_number
component_config['configuration']['run_configuration'] = run_configuration
try:
from ceam.framework.engine import configure, run
from ceam.framework.components import prepare_component_configuration
from ceam.framework.util import collapse_nested_dict
configure(draw_number=draw_number, simulation_config=branch_config)
results = run(prepare_component_configuration(component_config))
results = pd.DataFrame(results, index=[draw_number]).to_json()
return results
except Exception as e:
logging.exception('Unhandled exception in worker')
raise
finally:
logging.info('Exiting job: {}'.format((draw_number, component_config, branch_config)))
| Python | 0.000011 | |
164f43f902b89b84b4f0d474f4d3e0a18924110d | Add test of randomized select algorithm | selection_test.py | selection_test.py | import quicksort.quicksort
import random_selection.random_selection
import sys
import time
from random import randint
def main(max_len, check):
for n in [2**(n+1) for n in range(max_len)]:
arr = [randint(0, 2**max_len) for n in range(n)]
median = int((len(arr)+1)/2) - 1
current_time = time.time()
result = random_selection.random_selection.select(arr, median)
end_time = time.time() - current_time
sorted_arr = quicksort.quicksort.sort(arr)
if sorted_arr[median] == result:
print "Success! In %f" % end_time
else:
print "Failed"
return
if __name__ == '__main__':
arr_len = int(sys.argv[1])
main(arr_len) | Python | 0.000005 | |
80651fc7dba6a390091dc0f0908ec165cf33c0bb | make diagnostic plots for a star | scripts/plot_star.py | scripts/plot_star.py | """ Make diagnostic plots for a specified APOGEE ID """
# Standard library
from os import path
# Third-party
import h5py
import matplotlib.pyplot as plt
from sqlalchemy.orm.exc import NoResultFound
# Project
from twoface.log import log as logger
from twoface.db import db_connect
from twoface.db import (JokerRun, AllStar, AllVisit, StarResult, Status,
AllVisitToAllStar, RedClump, CaoVelocity)
from twoface.config import TWOFACE_CACHE_PATH
from twoface.io import load_samples
from twoface.plot import plot_data_orbits
def main(database_file, apogee_id, joker_run, cao):
db_path = path.join(TWOFACE_CACHE_PATH, database_file)
if not path.exists(db_path):
raise IOError("sqlite database not found at '{0}'\n Did you run "
"scripts/initdb.py yet for that database?"
.format(db_path))
logger.debug("Connecting to sqlite database at '{0}'".format(db_path))
Session, engine = db_connect(database_path=db_path,
ensure_db_exists=False)
session = Session()
# Get The Joker run information
run = session.query(JokerRun).filter(JokerRun.name == joker_run).one()
try:
star = session.query(AllStar).join(StarResult, JokerRun)\
.filter(AllStar.apogee_id == apogee_id)\
.filter(JokerRun.name == joker_run)\
.one()
except NoResultFound:
raise NoResultFound("Star {0} has no results in Joker run {1}."
.format(apogee_id, joker_run))
# get the RV data for this star
data = star.apogeervdata(cao=cao)
# load posterior samples from The Joker
samples_dict = load_samples(path.join(TWOFACE_CACHE_PATH,
'{0}.hdf5'.format(run.name)),
apogee_id)
# Plot the data with orbits on top
fig = plot_data_orbits(data, samples_dict, jitter=run.jitter,
xlim_choice='wide', title=star.apogee_id)
fig.set_tight_layout(True)
fig = plot_data_orbits(data, samples_dict, jitter=run.jitter,
xlim_choice='tight', title=star.apogee_id)
fig.set_tight_layout(True)
# TODO:
session.close()
plt.show()
if __name__ == "__main__":
from argparse import ArgumentParser
import logging
# Define parser object
parser = ArgumentParser(description="")
vq_group = parser.add_mutually_exclusive_group()
vq_group.add_argument('-v', '--verbose', action='count', default=0,
dest='verbosity')
vq_group.add_argument('-q', '--quiet', action='count', default=0,
dest='quietness')
# Required:
parser.add_argument("-a", "--apogeeid", dest="apogee_id",
required=True, type=str,
help="The APOGEE ID to visualize.")
parser.add_argument("-j", "--jokerrun", dest="joker_run",
required=True, type=str,
help="The Joker run name to load results from.")
# Optional:
parser.add_argument("-d", "--dbfile", dest="database_file",
default="apogee.sqlite", type=str,
help="Path to the database file.")
parser.add_argument("--cao", dest="cao_velocities", default=False,
action="store_true",
help="Plot the Cao velocities instead of APOGEE "
"radial velocities.")
args = parser.parse_args()
# Set logger level based on verbose flags
if args.verbosity != 0:
if args.verbosity == 1:
logger.setLevel(logging.DEBUG)
else: # anything >= 2
logger.setLevel(1)
elif args.quietness != 0:
if args.quietness == 1:
logger.setLevel(logging.WARNING)
else: # anything >= 2
logger.setLevel(logging.ERROR)
else: # default
logger.setLevel(logging.INFO)
main(apogee_id=args.apogee_id, database_file=args.database_file,
joker_run=args.joker_run, cao=args.cao_velocities)
| Python | 0.000003 | |
b528956e9394dc56951c2fb0894fefd7ee6872ff | Create cnn_evaluation.py | Convolutional_Neural_Network/cnn_evaluation.py | Convolutional_Neural_Network/cnn_evaluation.py | """ Using an Convolutional Nural Network on MNIST handwritten digits, and evaluating its performance with different scores
References:
Tflearn.org/examples
Tensorflow.org
Links:
[MNIST Dataset] http://yann.lecun.com/exdb/mnist/
Method and Examples Used:
[1] An simple example from Tflean, which is an higher level API for tensorflow provided with an autoencoder example which reconstructed the
images but the motive here was to evaluate this autoencoder with different score so it could be fine tuned in future for various specific tasks.
Also for reconstructing the images this program used decoder which we don't need for our evaluation.
[2] Secondly the last layer for classification should be softmax layer and here I changed here acoordingly
[3] I am not using Confusion matrix from tensorflow, rather I used sklearn library for that purpose.
[4] All the steps involved in this program is commented out for better understanding of this program.
"""
from __future__ import division, print_function, absolute_import
import numpy
import tflearn
import tensorflow as tf
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.normalization import local_response_normalization
from tflearn.layers.estimator import regression
from random import randint
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
# Data loading and preprocessing
import tflearn.datasets.mnist as mnist
Images, Lables, testImages, testLables = mnist.load_data(one_hot=True)
Images = Images.reshape([-1, 28, 28, 1])
testImages = testImages.reshape([-1, 28, 28, 1])
f = randint(0,20)
# Building convolutional network
network = input_data(shape=[None, 28, 28, 1], name='input')
network = conv_2d(network, 32, 3, activation='relu', regularizer="L2")
network = max_pool_2d(network, 2)
network = local_response_normalization(network)
network = conv_2d(network, 64, 3, activation='relu', regularizer="L2")
network = max_pool_2d(network, 2)
network = local_response_normalization(network)
network = fully_connected(network, 128, activation='tanh')
network = dropout(network, 0.8)
network = fully_connected(network, 256, activation='tanh')
network = dropout(network, 0.8)
network = fully_connected(network, 10, activation='softmax')
network = regression(network, optimizer='adam', learning_rate=0.01,
loss='categorical_crossentropy', name='target')
# Training
model = tflearn.DNN(network, tensorboard_verbose=0)
model.fit({'input': Images}, {'target':Lables}, n_epoch=1,
validation_set=({'input': testImages}, {'target': testLables}),
snapshot_step=100, show_metric=True, run_id='Convolution_Neural_Network')
# Here I evaluate the model with Test Images and Test Lables, calculating the Mean Accuracy of the model.
evaluation= model.evaluate(testImages,testLables)
print("\n")
print("\t"+"Mean accuracy of the model is :", evaluation)
# Prediction the Lables of the Images that we give to the model just to have a clear picture of Neural Netwok
lables = model.predict_label(testImages)
print("\n")
print("\t"+"The predicted labels are :",lables)
# Predicted probailites
y = model.predict(testImages)
print("\n")
print("\t"+"\t"+"\t"+"The predicted probabilities are :" )
print("\n")
print (y[f])
# Running a session to feed calculate the confusion matrix
sess = tf.Session()
# taking the argumented maximum of the predicted probabilities for generating confusion matrix
prediction = tf.argmax(y,1)
# displaying length of predictions and evaluating them in a session
with sess.as_default():
print (len(prediction.eval()))
predicted_labels = prediction.eval()
# Again importing the mnist data with one hot as false because we need to know the truepositive and other values for evaluation
Images, Lables, testImages, targetLables = mnist.load_data(one_hot=False)
# Used Sklearn library for evaluation as tensorflows library was not documented properly
# Generated the Confusion Matrix
confusionMatrix = confusion_matrix(targetLables, predicted_labels)
print("\n"+"\t"+"The confusion Matrix is ")
print ("\n",confusionMatrix)
# Classification_report in Sklearn provide all the necessary scores needed to succesfully evaluate the model.
classification = classification_report(targetLables,predicted_labels, digits=4,
target_names =['class 0','class 1','class 2','class 3','class 4','class 5','class 6','class 7','class 8','class 9'])
print("\n"+"\t"+"The classification report is ")
print ("\n",classification)
| Python | 0.000002 | |
9168807db69372ffb93430991fc4e666fa53a8f5 | Add missing example file | examples/movemean.py | examples/movemean.py | """
A moving average function using @guvectorize.
"""
import numpy as np
from numba import guvectorize
@guvectorize(['void(float64[:], intp[:], float64[:])'], '(n),()->(n)')
def move_mean(a, window_arr, out):
window_width = window_arr[0]
asum = 0.0
count = 0
for i in range(window_width):
asum += a[i]
count += 1
out[i] = asum / count
for i in range(window_width, len(a)):
asum += a[i] - a[i - window_width]
out[i] = asum / count
arr = np.arange(20, dtype=np.float64).reshape(2, 10)
print(arr)
print(move_mean(arr, 3))
| Python | 0.000005 | |
82d34111295fdfa35d0e9815053498e935d415af | Add example script to store & read datetime | examples/store_datetimes.py | examples/store_datetimes.py | import h5py
import numpy as np
arr = np.array([np.datetime64('2019-09-22T17:38:30')])
with h5py.File('datetimes.h5', 'w') as f:
# Create dataset
f['data'] = arr.astype(h5py.opaque_dtype(arr.dtype))
# Read
print(f['data'][:])
| Python | 0 | |
e581eb8af860456b0ff46e99398002b3df0f0677 | add Julia magic for IPython | julia/magic.py | julia/magic.py | """
==========================
Julia magics for IPython
==========================
{JULIAMAGICS_DOC}
Usage
=====
``%%julia``
{JULIA_DOC}
"""
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function
import sys
from IPython.core.magic import Magics, magics_class, line_cell_magic
from julia import Julia
#-----------------------------------------------------------------------------
# Main classes
#-----------------------------------------------------------------------------
@magics_class
class JuliaMagics(Magics):
"""A set of magics useful for interactive work with Julia.
"""
def __init__(self, shell):
"""
Parameters
----------
shell : IPython shell
"""
super(JuliaMagics, self).__init__(shell)
print("Initializing Julia interpreter. This may take some time...",
end='')
# Flush, otherwise the Julia startup will keep stdout buffered
sys.stdout.flush()
self.julia = Julia(init_julia=True)
print()
@line_cell_magic
def julia(self, line, cell=None):
"""
Execute code in Julia, and pull some of the results back into the
Python namespace.
"""
src = str(line if cell is None else cell)
return self.julia.eval(src)
# Add to the global docstring the class information.
__doc__ = __doc__.format(
JULIAMAGICS_DOC = ' '*8 + JuliaMagics.__doc__,
JULIA_DOC = ' '*8 + JuliaMagics.julia.__doc__,
)
#-----------------------------------------------------------------------------
# IPython registration entry point.
#-----------------------------------------------------------------------------
def load_ipython_extension(ip):
"""Load the extension in IPython."""
ip.register_magics(JuliaMagics)
| Python | 0.000406 | |
e830ce7115ea417feb00c62bf68a7d1829815630 | Create UAV_State class | scripts/uav_state.py | scripts/uav_state.py | #!/usr/bin/env python
#
# UAV State Model:
# Encapsulates UAV state and abstracts communication
# States:
# - Setpoint pose
# - local_position
# - MAV mode
# - arm
# import ROS libraries
import rospy
import mavros
from mavros.utils import *
from mavros import setpoint as SP
import mavros_msgs.msg
import mavros_msgs.srv
#
import time
from datetime import datetime
import enum
class AutoNumber(enum.Enum):
def __new__(cls):
value = len(cls.__members__) + 1
obj = object.__new__(cls)
obj._value_ = value
return obj
class MODE(AutoNumber):
MANUAL = ()
RTL = ()
class ARM(AutoNumber):
ARMED = ()
DISARMED = ()
#
class _coord:
def __init__(self):
self.x = 0
self.y = 0
self.z = 0
#
class UAV_State:
def __init__(self):
self.current_pose = _coord()
self.setpoint_pose = _coord()
self.mode = "None"
self.arm = "None"
self.guided = "None"
self.timestamp = float(datetime.utcnow().strftime('%S.%f'))
self.connection_delay = 0.0
mavros.set_namespace("/mavros")
# Subscribers
self.local_position_sub = rospy.Subscriber(mavros.get_topic('local_position', 'pose'),
SP.PoseStamped, self.__local_position_cb)
self.setpoint_local_sub = rospy.Subscriber(mavros.get_topic('setpoint_raw', 'target_local'),
mavros_msgs.msg.PositionTarget, self.__setpoint_position_cb)
self.state_sub = rospy.Subscriber(mavros.get_topic('state'),
mavros_msgs.msg.State, self.__state_cb)
pass
def __local_position_cb(self, topic):
self.current_pose.x = topic.pose.position.x
self.current_pose.y = topic.pose.position.y
self.current_pose.z = topic.pose.position.z
def __setpoint_position_cb(self, topic):
self.setpoint_pose.x = topic.position.x
self.setpoint_pose.y = topic.position.y
self.setpoint_pose.z = topic.position.z
def __state_cb(self, topic):
self.__calculate_delay()
self.mode = topic.mode
self.guided = topic.guided
self.arm = topic.armed
def __calculate_delay(self):
tmp = float(datetime.utcnow().strftime('%S.%f'))
if tmp<self.timestamp:
# over a minute
self.connection_delay = 60.0 - self.timestamp + tmp
else:
self.connection_delay = tmp - self.timestamp
self.timestamp = tmp
####
def get_mode(self):
return self.mode
def set_mode(self, new_mode):
rospy.wait_for_service('/mavros/set_mode')
try:
flightModeService = rospy.ServiceProxy('/mavros/set_mode', mavros_msgs.srv.SetMode)
isModeChanged = flightModeService(custom_mode=new_mode)
except rospy.ServiceException, e:
rospy.loginfo("Service set_mode call failed: %s. Mode %s could not be set. Check that GPS is enabled.",e,new_mode)
####
def get_arm(self):
return self.arm
def set_arm(self, new_arm):
rospy.wait_for_service('/mavros/cmd/arming')
try:
armService = rospy.ServiceProxy('/mavros/cmd/arming', mavros_msgs.srv.CommandBool)
armService(new_arm)
except rospy.ServiceException, e:
rospy.loginfo("Service arm call failed: %s. Attempted to set %s",e,new_arm)
####
def get_current_pose(self):
return self.current_pose
def get_setpoint_pose(self):
return self.setpoint_pose
def get_guided(self):
return self.guided
def get_delay(self):
return self.connection_delay
| Python | 0 | |
12691d47c4dbbaac42d2c9a8fe04e70cb5a94e98 | add Yaspin.write usage example | examples/write_method.py | examples/write_method.py | # -*- coding: utf-8 -*-
"""
examples.write_method
~~~~~~~~~~~~~~~~~~~~~
Basic usage of ``write`` method.
"""
import time
from yaspin import yaspin
def main():
with yaspin(text='Downloading images') as sp:
# task 1
time.sleep(1)
sp.write('> image 1 download complete')
# task 2
time.sleep(2)
sp.write('> image 2 download complete')
# finalize
sp.ok()
if __name__ == '__main__':
main()
| Python | 0 | |
324bc6f72deef0349f0da48366ab11b749a231b5 | Make AzureKeyVaultBackend backwards-compatible (#12626) | airflow/contrib/secrets/azure_key_vault.py | airflow/contrib/secrets/azure_key_vault.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use `airflow.providers.microsoft.azure.secrets.azure_key_vault`."""
import warnings
# pylint: disable=unused-import
from airflow.providers.microsoft.azure.secrets.azure_key_vault import AzureKeyVaultBackend # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.providers.microsoft.azure.secrets.azure_key_vault`.",
DeprecationWarning,
stacklevel=2,
)
| Python | 0 | |
165d6795c2e3b173282736127c092ede57ae8f55 | Create create_recurring_for_failed.py | erpnext/patches/v6_27/create_recurring_for_failed.py | erpnext/patches/v6_27/create_recurring_for_failed.py | import frappe
from erpnext.controllers.recurring_document import manage_recurring_documents
def execute():
frappe.db.sql("""update `tabSales Invoice`
set is_recurring=1 where (docstatus=1 or docstatus=0) and next_date='2016-06-26' and is_recurring=0""")
manage_recurring_documents("Sales Invoice", "2016-06-26")
| Python | 0.000004 | |
93d1d4cc446cd13affaf1b467e39845c5dc437a5 | Add missing migration | events/migrations/0002_auto_20150119_2138.py | events/migrations/0002_auto_20150119_2138.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('events', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='offer',
name='price',
field=models.CharField(max_length=512),
preserve_default=True,
),
migrations.AlterField(
model_name='offer',
name='price_en',
field=models.CharField(null=True, max_length=512),
preserve_default=True,
),
migrations.AlterField(
model_name='offer',
name='price_fi',
field=models.CharField(null=True, max_length=512),
preserve_default=True,
),
migrations.AlterField(
model_name='offer',
name='price_sv',
field=models.CharField(null=True, max_length=512),
preserve_default=True,
),
]
| Python | 0.0002 | |
a8b46224dfda38173ea130d820411aad6a47acfc | Add Commander.py | src/Commander.py | src/Commander.py | # Copyright (c) 2013 Molly White
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import argparse
from Bot import Bot
from time import strftime
import logging
def main():
print ("Starting GorillaBot.\n")
desc = "This is the command-line utility for setting up and running GorillaBot, "
"a simple IRC bot."
parser = argparse.ArgumentParser(description=desc)
parser.add_argument("-default", action="store_true")
logger = logging.getLogger("GB")
logger.info("LOG!")
GorillaBot = Bot()
parser.parse_args()
if __name__ == "__main__":
main()
| Python | 0.000004 | |
52c9a8ab10934c7acf8bcc404dccd2524199acb7 | support for qualifying keys with dot('.') in JSON reference | src/DictUtils.py | src/DictUtils.py | import collections
class DictUtils:
@staticmethod
def __retrieveFromDict(t, key):
if None != t:
found = True
if str == type(key):
keys = key.split('.')
else:
keys = key
for k in keys:
if k in t:
t = t[k]
else:
found = False
break
if found:
return t
return None
@staticmethod
def defaultIfNone(theDict, defaultDict, key):
if None == key:
return None
val = DictUtils.__retrieveFromDict(theDict, key)
if None != val:
return val
return DictUtils.__retrieveFromDict(defaultDict, key)
@staticmethod
def convert(data):
if isinstance(data, basestring):
return str(data)
elif isinstance(data, collections.Mapping):
return dict(map(DictUtils.convert, data.iteritems()))
elif isinstance(data, collections.Iterable):
return type(data)(map(DictUtils.convert, data))
else:
return data | import collections
class DictUtils:
@staticmethod
def __retrieveFromDict(t, key):
if None != t:
found = True
if str == type(key):
keys = [key]
else:
keys = key
for k in keys:
if k in t:
t = t[k]
else:
found = False
break
if found:
return t
return None
@staticmethod
def defaultIfNone(theDict, defaultDict, key):
if None == key:
return None
val = DictUtils.__retrieveFromDict(theDict, key)
if None != val:
return val
return DictUtils.__retrieveFromDict(defaultDict, key)
@staticmethod
def convert(data):
if isinstance(data, basestring):
return str(data)
elif isinstance(data, collections.Mapping):
return dict(map(DictUtils.convert, data.iteritems()))
elif isinstance(data, collections.Iterable):
return type(data)(map(DictUtils.convert, data))
else:
return data | Python | 0.000072 |
fe36fd79c1981c489fd1db548c7468acbf98fff5 | add test for s3 filename unquote | app/backend/gwells/tests/test_documents.py | app/backend/gwells/tests/test_documents.py | from django.test import TestCase
from gwells.documents import MinioClient
class DocumentsTestCase(TestCase):
def test_document_url_with_space(self):
minio_client = MinioClient(disable_private=True)
test_document = {
"bucket_name": "test_bucket",
"object_name": "test key"
}
test_url = minio_client.create_url(test_document, "example.com", test_document.get("bucket_name"))
self.assertEqual(test_url, "https://example.com/test_bucket/test key")
def test_document_url_with_plus(self):
minio_client = MinioClient(disable_private=True)
test_document = {
"bucket_name": "test_bucket",
# if this was a real plus in the filename it should be %2B in the listing.
# spaces get encoded into + (so in this test case, this object_name originally had a space).
"object_name": "test+key"
}
test_url = minio_client.create_url(test_document, "example.com", test_document.get("bucket_name"))
self.assertEqual(test_url, "https://example.com/test_bucket/test key")
| Python | 0.000001 | |
510a3ddd61fb5c6599b85650c09a0f1b799f06b2 | add 'fw11.py' which supports urlpath parameter | framework_python/fw11.py | framework_python/fw11.py | # -*- coding: utf-8 -*-
##
## URLパスパラメータをサポート
##
import os
import re
import json
class Request(object):
def __init__(self, environ):
self.environ = environ
self.method = environ['REQUEST_METHOD']
self.path = environ['PATH_INFO']
class Response(object):
def __init__(self):
self.status = "200 OK"
self.headers = {
'Content-Type': "text/html;charset=utf-8",
}
def header_list(self):
return [ (k, v) for k, v in self.headers.items() ]
@property
def content_type(self):
return self.headers['Content-Type']
@content_type.setter
def content_type(self, value):
self.headers['Content-Type'] = value
class BaseAction(object):
def __init__(self, req, resp):
self.req = req
self.resp = resp
def before_action(self):
pass
def after_action(self, ex):
pass
def invoke_action(self, func, kwargs): # ← 変更
content = func(self, **kwargs) # ← 変更
return content
def handle_action(self, func, kwargs): # ← 変更
ex = None
try:
self.before_action()
return self.invoke_action(func, kwargs)
except Exception as ex_:
ex = ex_
raise
finally:
self.after_action(ex)
class Action(BaseAction):
def invoke_action(self, func, kwargs): # ← 変更
content = BaseAction.invoke_action(self, func, kwargs) # ← 変更
if isinstance(content, dict):
content = json.dumps(content, ensure_ascii=False)
self.resp.content_type = "application/json"
return content
def _http_405(self):
self.resp.status = "405 Method Not Allowed"
return "<h2>405 Method Not Allowed</h2>"
def GET (self, **kwargs): return self._http_405()
def POST (self, **kwargs): return self._http_405()
def PUT (self, **kwargs): return self._http_405()
def DELETE (self, **kwargs): return self._http_405()
def PATCH (self, **kwargs): return self._http_405()
def OPTIONS(self, **kwargs): return self._http_405()
def TRACE (self, **kwargs): return self._http_405()
def HEAD(self, **kwargs):
return self.GET(**kwargs)
HTTP_REQUEST_METHODS = {'GET', 'POST', 'PUT', 'DELETE', 'HEAD',
'PATCH', 'OPTIONS', 'TRACE'}
assert HTTP_REQUEST_METHODS == { s for s in dir(Action) if s.isupper() }
class HelloAction(Action):
def GET(self, name): # ← 変更
msg = "Hello, %s" % name # ← 変更
return {"message": msg} # ← 変更
class EnvironAction(Action):
def GET(self):
environ = self.req.environ
buf = []
for key in sorted(environ.keys()):
if key in os.environ:
continue
val = environ[key]
typ = "(%s)" % type(val).__name__
buf.append("%-25s %-7s %r\n" % (key, typ, val))
content = "".join(buf)
self.resp.content_type = "text/plain;charset=utf-8"
return content
class FormAction(Action):
def GET(self):
req_meth = self.req.method
html = ('<p>REQUEST_METHOD: %r</p>\n'
'<form method="POST" action="/form">\n'
'<input type="submit">\n'
'</form>\n')
return html % req_meth
def POST(self):
req_meth = self.req.method
html = ('<p>REQUEST_METHOD: %r</p>\n'
'<p><a href="/form">back</p>\n')
return html % req_meth
mapping_list = [
('/hello/{name}', HelloAction), # ← 変更
('/environ' , EnvironAction),
('/form' , FormAction),
]
## URLパスパターンを正規表現に変換する。
## 例: '/api/foo/{id}.json' → '^/api/foo/(?P<id>[^/]+)\\.json$'
def _convert_urlpath(urlpath): # ex: '/api/foo/{id}.json'
def _re_escape(string):
return re.escape(string).replace(r'\/', '/')
#
buf = ['^']; add = buf.append
pos = 0
for m in re.finditer(r'(.*?)\{(.*?)\}', urlpath):
pos = m.end(0) # ex: 13
string, param_name = m.groups() # ex: ('/api/foo/', 'id')
if not param_name.isidentifier():
raise ValueError("'{%s}': invalid parameter (in '%s')" \
% (param_name, urlpath))
add(_re_escape(string))
add('(?P<%s>[^/]+)' % param_name) # ex: '(?P<id>[^/]+)'
remained = urlpath[pos:] # ex: '.json'
add(_re_escape(remained))
add('$')
return "".join(buf) # ex: '^/api/foo/(?P<id>[^/]+)\\.json$'
class WSGIApplication(object):
def __init__(self, mapping_list):
new_list = []
self._build(mapping_list, new_list)
self._mapping_list = new_list
def _build(self, mapping_list, new_list):
for urlpath, klass in mapping_list:
rexp = re.compile(_convert_urlpath(urlpath))
t = (urlpath, rexp, klass)
new_list.append(t)
def lookup(self, req_path):
## リクエストパスに対応した Action クラスに加え、
## URLパスパラメータの値も返す
for _, rexp, klass in self._mapping_list:
m = rexp.match(req_path)
if m:
kwargs = m.groupdict() # ex: {"id": 123}
return klass, kwargs # ex: UserAction, {"id": 123}
return None, None
def __call__(self, environ, start_response):
req = Request(environ)
resp = Response()
#
req_meth = req.method
req_path = req.path
klass, kwargs = self.lookup(req_path) # ← 変更
#
if klass is None:
status = "404 Not Found"
content = "<h2>%s</h2>" % status
elif req_meth not in HTTP_REQUEST_METHODS:
status = "405 Method Not Allowed"
content = "<h2>%s</h2>" % status
else:
func = getattr(klass, req_meth)
action = klass(req, resp)
content = action.handle_action(func, kwargs) # ← 変更
status = resp.status
if req_meth == 'HEAD':
content = ""
#
headers = resp.header_list()
start_response(status, headers)
return [content.encode('utf-8')]
wsgi_app = WSGIApplication(mapping_list)
if __name__ == "__main__":
from wsgiref.simple_server import make_server
wsgi_server = make_server('localhost', 7000, wsgi_app)
wsgi_server.serve_forever()
| Python | 0.000004 | |
4b06b5ec929af3466bfe9f03892b6c68259a2e3e | add gunicorn app | gunicorn_app.py | gunicorn_app.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
DATA_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), 'data'))
from logbook.compat import redirect_logging
redirect_logging()
from aip import make
from aip.log import RedisPub
with RedisPub():
app = make(
instance_path=DATA_PATH,
instance_relative_config=True
)
from werkzeug.contrib.fixers import ProxyFix
app.wsgi_app = ProxyFix(app.wsgi_app)
| Python | 0 | |
52236b1ad285683d828b248e462a7b984d31e636 | Add example of connecting OGR to matplotlib through shapely and numpy | examples/world.py | examples/world.py | import ogr
import pylab
from numpy import asarray
from shapely.wkb import loads
source = ogr.Open("/var/gis/data/world/world_borders.shp")
borders = source.GetLayerByName("world_borders")
fig = pylab.figure(1, figsize=(4,2), dpi=300)
while 1:
feature = borders.GetNextFeature()
if not feature:
break
geom = loads(feature.GetGeometryRef().ExportToWkb())
a = asarray(geom)
pylab.plot(a[:,0], a[:,1])
pylab.show()
| Python | 0.000002 | |
bc871956d492a3bc34e28847de136e1b4ad82035 | Create codechallenge.py | codechallenge.py | codechallenge.py | Python | 0.000004 | ||
08a813019c43288051e2ef5cbdfc6daaa0b6a32c | fix running rubyspec? | fabfile/travis.py | fabfile/travis.py | import glob
import os
from fabric.api import task, local
from fabric.context_managers import lcd
class Test(object):
def __init__(self, func, deps=[], needs_pypy=True, needs_rubyspec=False):
self.func = func
self.deps = deps
self.needs_pypy = needs_pypy
self.needs_rubyspec = needs_rubyspec
def install_deps(self):
local("pip install {}".format(" ".join(self.deps)))
def download_pypy(self):
local("wget https://bitbucket.org/pypy/pypy/get/default.tar.bz2 -O `pwd`/../pypy.tar.bz2")
local("bunzip2 `pwd`/../pypy.tar.bz2")
local("tar -xf `pwd`/../pypy.tar -C `pwd`/../")
[path_name] = glob.glob("../pypy-pypy*")
path_name = os.path.abspath(path_name)
with open("pypy_marker", "w") as f:
f.write(path_name)
def download_mspec(self):
with lcd(".."):
local("git clone --depth=100 --quiet https://github.com/rubyspec/mspec")
def download_rubyspec(self):
with lcd(".."):
local("git clone --depth=100 --quiet https://github.com/rubyspec/rubyspec")
def run_tests(self):
env = {}
if self.needs_pypy:
with open("pypy_marker") as f:
env["pypy_path"] = f.read()
self.func(env)
@task
def install_requirements():
t = TEST_TYPES[os.environ["TEST_TYPE"]]
if t.deps:
t.install_deps()
if t.needs_pypy:
t.download_pypy()
if t.needs_rubyspec:
t.download_mspec()
t.download_rubyspec()
@task
def run_tests():
t = TEST_TYPES[os.environ["TEST_TYPE"]]
t.run_tests()
def run_own_tests(env):
local("PYTHONPATH=$PYTHONPATH:{pypy_path} py.test".format(**env))
def run_translate_tests(env):
rubyspec_tests = [
"language/and_spec.rb",
"language/not_spec.rb",
"language/order_spec.rb",
"language/unless_spec.rb",
]
local("PYTHONPATH={pypy_path}:$PYTHONPATH python {pypy_path}/pypy/translator/goal/translate.py --batch -Ojit targetrupypy.py".format(**env))
spec_files = " ".join(os.path.join("../rubyspec", p) for p in rubyspec_tests)
local("../mspec/bin/mspec -t `pwd`/topaz-c {spec_files}".format(spec_files=spec_files))
def run_docs_tests(env):
local("sphinx-build -W -b html docs/ docs/_build/")
RPLY_URL = "-e git+https://github.com/alex/rply#egg=rply"
TEST_TYPES = {
"own": Test(run_own_tests, deps=["pytest", RPLY_URL]),
"translate": Test(run_translate_tests, deps=[RPLY_URL], needs_rubyspec=True),
"docs": Test(run_docs_tests, deps=["sphinx"], needs_pypy=False),
}
| import glob
import os
from fabric.api import task, local
from fabric.context_managers import lcd
class Test(object):
def __init__(self, func, deps=[], needs_pypy=True, needs_rubyspec=False):
self.func = func
self.deps = deps
self.needs_pypy = needs_pypy
self.needs_rubyspec = needs_rubyspec
def install_deps(self):
local("pip install {}".format(" ".join(self.deps)))
def download_pypy(self):
local("wget https://bitbucket.org/pypy/pypy/get/default.tar.bz2 -O `pwd`/../pypy.tar.bz2")
local("bunzip2 `pwd`/../pypy.tar.bz2")
local("tar -xf `pwd`/../pypy.tar -C `pwd`/../")
[path_name] = glob.glob("../pypy-pypy*")
path_name = os.path.abspath(path_name)
with open("pypy_marker", "w") as f:
f.write(path_name)
def download_mspec(self):
with lcd(".."):
local("git clone --depth=100 --quiet https://github.com/rubyspec/mspec")
def download_rubyspec(self):
with lcd(".."):
local("git clone --depth=100 --quiet https://github.com/rubyspec/rubyspec")
def run_tests(self):
env = {}
if self.needs_pypy:
with open("pypy_marker") as f:
env["pypy_path"] = f.read()
self.func(env)
@task
def install_requirements():
t = TEST_TYPES[os.environ["TEST_TYPE"]]
if t.deps:
t.install_deps()
if t.needs_pypy:
t.download_pypy()
if t.needs_rubyspec:
t.download_mspec()
t.download_rubyspec()
@task
def run_tests():
t = TEST_TYPES[os.environ["TEST_TYPE"]]
t.run_tests()
def run_own_tests(env):
local("PYTHONPATH=$PYTHONPATH:{pypy_path} py.test".format(**env))
def run_translate_tests(env):
rubyspec_tests = [
"language/and_spec.rb",
"language/not_spec.rb",
"language/order_spec.rb",
"language/unless_spec.rb",
]
local("PYTHONPATH={pypy_path}:$PYTHONPATH python {pypy_path}/pypy/translator/goal/translate.py --batch -Ojit targetrupypy.py".format(**env))
spec_files = " ".join(os.path.join("../rubyspec", p) for p in rubyspec_tests)
local("../mspec/bin/mspec -t topaz-c {spec_files}".format(spec_files=spec_files))
def run_docs_tests(env):
local("sphinx-build -W -b html docs/ docs/_build/")
RPLY_URL = "-e git+https://github.com/alex/rply#egg=rply"
TEST_TYPES = {
"own": Test(run_own_tests, deps=["pytest", RPLY_URL]),
"translate": Test(run_translate_tests, deps=[RPLY_URL], needs_rubyspec=True),
"docs": Test(run_docs_tests, deps=["sphinx"], needs_pypy=False),
}
| Python | 0 |
2044e3b018595e45cc2969d0675d5006ea02ccf5 | update to use new struct data of g_project | trunk/editor/savefilerooms.py | trunk/editor/savefilerooms.py | #!/usr/bin/env python
from xml.dom import minidom
from xml.etree import ElementTree
#to use OrderedDict in python < 2.7
try:
from collections import OrderedDict
except ImportError:
from misc.dict import OrderedDict
from structdata.project import g_project
def prettify(content):
"""
Return a pretty-printed XML string for the Element.
"""
rough_string = ElementTree.tostring(content, 'utf-8')
reparsed = minidom.parseString(rough_string)
return reparsed.toprettyxml(indent=" ")
def saveData(top, tag, dictionary):
tag_dict = {}
dict_todo = []
#cicla su tutti gli elementi del dizionario
#se trova delle liste le salva per poi richiamare se stessa
#su questi per poter memorizzare i dati
for key, value in dictionary.items():
if not isinstance(value, list):
tag_dict[key] = value
else:
dict_todo.append(value)
father_tag = ElementTree.SubElement(top, tag, tag_dict)
for el in dict_todo:
for single_el in el:
saveData(father_tag, single_el.tag_name, single_el.dictionary())
def saveFileRooms(path_file):
"""
funzione che salva la struttura dati su un file .rooms
prende in ingresso il path del file e la struttura che contiene tutti i dati
da salvare
"""
top = ElementTree.Element("world",
g_project.data['world'].dictionary())
for data_key, data_value in g_project.data.items():
if data_key != "world":
father = ElementTree.SubElement(top, data_key)
for key, value in data_value:
saveData(father, value.tag_name,
value.dictionary())
write_file = open(path_file, 'w')
write_file.write(prettify(top))
| #!/usr/bin/env python
from xml.dom import minidom
from xml.etree import ElementTree
#to use OrderedDict in python < 2.7
try:
from collections import OrderedDict
except ImportError:
from misc.dict import OrderedDict
from structdata.world import g_world
def prettify(content):
"""
Return a pretty-printed XML string for the Element.
"""
rough_string = ElementTree.tostring(content, 'utf-8')
reparsed = minidom.parseString(rough_string)
return reparsed.toprettyxml(indent=" ")
def saveData(top, tag, dictionary):
tag_dict = {}
dict_todo = []
#cicla su tutti gli elementi del dizionario
#se trova delle liste le salva per poi richiamare se stessa
#su questi per poter memorizzare i dati
for key, value in dictionary.items():
if not isinstance(value, list):
tag_dict[key] = value
else:
dict_todo.append(value)
father_tag = ElementTree.SubElement(top, tag, tag_dict)
for el in dict_todo:
for single_el in el:
saveData(father_tag, single_el.tag_name, single_el.dictionary())
def saveFileRooms(path_file):
"""
funzione che salva la struttura dati su un file .rooms
prende in ingresso il path del file e la struttura che contiene tutti i dati
da salvare
"""
top = ElementTree.Element("world",
g_world.informations.dictionary())
for key_information in g_world.dictionary():
if key_information != "informations":
father = ElementTree.SubElement(top, key_information)
for key in g_world.__dict__[key_information]:
saveData(father, g_world.__dict__[key_information][key].tag_name,
g_world.__dict__[key_information][key].dictionary())
write_file = open(path_file, 'w')
write_file.write(prettify(top))
| Python | 0 |
93d91ba059a7037281f6a5e4d6afd5e071668d81 | Create freebook.py | freebook/reddit/freebook.py | freebook/reddit/freebook.py | # Get free ebooks from Reddit
from bs4 import BeautifulSoup
import feedparser
import requests
url = "https://www.reddit.com/r/freebooks.rss"
headers = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.13; rv:59.0) Gecko/20100101 Firefox/59.0"}
urls = []
books = []
book_data_all = []
d = feedparser.parse(requests.get(url, headers=headers).text)
print(len(d.entries))
for e in d.entries:
for l in BeautifulSoup(e.description, "html.parser").find_all("a"):
if l.string == "[link]" and "reddit" not in l["href"]:
print(e.title)
print(l["href"])
urls.append(l["href"])
print()
print(urls)
print("***GETTING BOOK DATA***")
for u in urls:
if "amazon" in u:
book_data = BeautifulSoup(requests.get(u, headers=headers).text, "html.parser")
print(u)
title = book_data.find("span", attrs={"id":"ebooksProductTitle"}).string
if "Visit" in book_data.find("span", attrs={"class":"author notFaded"}).find("a", attrs={"class":"a-link-normal"}).string:
author = book_data.find("span", {"class":"a-size-medium"}).text.replace("\n", "").replace("\t", "").replace("(Author)", "").strip()
else:
author = book_data.find("span", attrs={"class":"author notFaded"}).find("a", attrs={"class":"a-link-normal"}).string
try:
price = str(book_data.find("td", attrs={"class":"a-color-price"})).replace("\n", "").replace(" ", "").split(">")[1].split("<")[0]
except TypeError:
price = book_data.find("td", attrs={"class":"a-color-base a-align-bottom a-text-strike"}).string.strip()
try:
book_data_all.append([title, author, price, u])
except Exception as e:
print(e)
continue
print(book_data_all)
print(len(book_data_all))
for b in book_data_all:
if b[2] == "$0.00":
books.append(b)
else:
continue
print(len(books))
print(str(len(book_data_all) - len(books)) + " paid books")
print(books)
| Python | 0 | |
6edadeb278be9b776845a12954871386ead270d4 | add tests for log rotation | plenum/test/test_log_rotation.py | plenum/test/test_log_rotation.py | import pytest
import os
import logging
import shutil
import time
from plenum.common.logging.TimeAndSizeRotatingFileHandler \
import TimeAndSizeRotatingFileHandler
def cleanFolder(path):
if os.path.exists(path):
shutil.rmtree(path)
os.makedirs(path, exist_ok=True)
return path
def test_time_log_rotation():
logDirPath = cleanFolder("/tmp/plenum/test_time_log_rotation")
logFile = os.path.join(logDirPath, "log")
logger = logging.getLogger('test_time_log_rotation-logger')
logger.setLevel(logging.DEBUG)
handler = TimeAndSizeRotatingFileHandler(logFile, interval=1, when='s')
logger.addHandler(handler)
for i in range(3):
time.sleep(1)
logger.debug("line")
assert len(os.listdir(logDirPath)) == 4 # initial + 3 new
def test_size_log_rotation():
logDirPath = cleanFolder("/tmp/plenum/test_size_log_rotation")
logFile = os.path.join(logDirPath, "log")
logger = logging.getLogger('test_time_log_rotation-logger')
logger.setLevel(logging.DEBUG)
handler = TimeAndSizeRotatingFileHandler(logFile, maxBytes=21)
logger.addHandler(handler)
for i in range(20):
logger.debug("line")
assert len(os.listdir(logDirPath)) == 5
def test_time_and_size_log_rotation():
logDirPath = cleanFolder("/tmp/plenum/test_time_and_size_log_rotation")
logFile = os.path.join(logDirPath, "log")
logger = logging.getLogger('test_time_and_size_log_rotation-logger')
logger.setLevel(logging.DEBUG)
handler = TimeAndSizeRotatingFileHandler(logFile, maxBytes=21, interval=1, when="s")
logger.addHandler(handler)
for i in range(20):
logger.debug("line")
for i in range(3):
time.sleep(1)
logger.debug("line")
assert len(os.listdir(logDirPath)) == 8 | Python | 0 | |
fd75ee4a96eddc1e71eb85dd36a2c8f5b13807ca | Create RemoveLinkedListElement.py | RemoveLinkedListElement.py | RemoveLinkedListElement.py | """Remove Linked List Elements
Remove all elements from a linked list of integers that have value val.
Example
Given: 1 --> 2 --> 6 --> 3 --> 4 --> 5 --> 6, val = 6
Return: 1 --> 2 --> 3 --> 4 --> 5
"""
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def removeElements(self, head, val):
"""
:type head: ListNode
:type val: int
:rtype: ListNode
"""
if not head:
return None
while head and head.val==val:
head=head.next
pos=head
while pos and pos.next:
if pos.next.val==val:
pos.next=pos.next.next
else:
pos=pos.next
return head
| Python | 0.000001 | |
fd33fadc260cda2bd2395f027457f990ab05480b | Add migration for Registration changed | registration/migrations/0008_auto_20160418_2250.py | registration/migrations/0008_auto_20160418_2250.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-04-18 13:50
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('registration', '0007_auto_20160416_1217'),
]
operations = [
migrations.AlterField(
model_name='registration',
name='payment_status',
field=models.CharField(choices=[('ready', 'Ready'), ('paid', 'Paid'), ('deleted', 'Deleted')], default='ready', max_length=10),
),
migrations.AlterField(
model_name='registration',
name='transaction_code',
field=models.CharField(blank=True, max_length=36),
),
]
| Python | 0 | |
3c1be9f8fb362699737b6dd867398e734057c300 | Add main entry point. | rave/__main__.py | rave/__main__.py | import argparse
import sys
from os import path
def parse_arguments():
parser = argparse.ArgumentParser(description='A modular and extensible visual novel engine.', prog='rave')
parser.add_argument('-b', '--bootstrapper', help='Select bootstrapper to bootstrap the engine with. (default: autoselect)')
parser.add_argument('-B', '--game-bootstrapper', metavar='BOOTSTRAPPER', help='Select bootstrapper to bootstrap the game with. (default: autoselect)')
parser.add_argument('-d', '--debug', action='store_true', help='Enable debug logging.')
parser.add_argument('game', metavar='GAME', nargs='?', help='The game to run. Format dependent on used bootstrapper.')
arguments = parser.parse_args()
return arguments
def main():
args = parse_arguments()
if args.debug:
from . import log
log.Logger.LEVEL |= log.DEBUG
from . import bootstrap
bootstrap.bootstrap_engine(args.bootstrapper)
bootstrap.bootstrap_game(args.game_bootstrapper, args.game)
main()
| Python | 0 | |
592b3dda603dec0765825fc8dc03fb623906cb63 | Add migration | infrastructure/migrations/0018_auto_20210928_1642.py | infrastructure/migrations/0018_auto_20210928_1642.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-09-28 14:42
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('infrastructure', '0017_auto_20210928_1329'),
]
operations = [
migrations.AlterField(
model_name='project',
name='latest_implementation_year',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='infrastructure.FinancialYear'),
),
]
| Python | 0.000002 | |
ede4704704f5f6b246d70c84a16be9465cfa55e2 | Triplet with given sum | Arrays/triplet_with_given_sum.py | Arrays/triplet_with_given_sum.py | import unittest
"""
Given an unsorted array of numbers, and a value, find a triplet whose sum is equal to value.
Input: 12 3 4 1 6 9, value = 24
Output: 12 3 9
"""
"""
Approach:
1. Sort the array.
2. Scan from left to right.
3. Fix current element as potential first element of triplet.
4. Find a pair which has sum as value - current element in the remaining sorted portion of array.
"""
def find_triplet_with_given_sum(list_of_numbers, target_sum):
list_of_numbers = sorted(list_of_numbers)
for i in range(len(list_of_numbers)):
for j in range(i, len(list_of_numbers)):
low = j
high = len(list_of_numbers) - 1
actual_sum = list_of_numbers[i] + list_of_numbers[low] + list_of_numbers[high]
if actual_sum == target_sum:
return list_of_numbers[i], list_of_numbers[low], list_of_numbers[high]
elif actual_sum < target_sum:
low += 1
else:
high -= 1
return None
class TestTripletSum(unittest.TestCase):
def test_triplet_sum(self):
list_of_numbers = [12, 3, 4, 1, 6, 9]
triplet = find_triplet_with_given_sum(list_of_numbers, 24)
self.assertEqual(len(triplet), 3)
self.assertIn(12, triplet)
self.assertIn(3, triplet)
self.assertIn(9, triplet)
self.assertIsNone(find_triplet_with_given_sum(list_of_numbers, -12))
| Python | 0.999999 | |
05659cd132a5dfb54b50ec38ff1d405697de251a | Add crawler for superpoop | comics/crawler/crawlers/superpoop.py | comics/crawler/crawlers/superpoop.py | from comics.crawler.base import BaseComicCrawler
from comics.crawler.meta import BaseComicMeta
class ComicMeta(BaseComicMeta):
name = 'Superpoop'
language = 'en'
url = 'http://www.superpoop.com/'
start_date = '2008-01-01'
history_capable_days = 30
schedule = 'Mo,Tu,We,Th'
time_zone = -5
rights = 'Drew'
class ComicCrawler(BaseComicCrawler):
def _get_url(self):
self.parse_feed('http://www.superpoop.com/rss/rss.php')
for entry in self.feed.entries:
if self.timestamp_to_date(entry.updated_parsed) == self.pub_date:
self.title = entry.title
pieces = entry.summary.split('"')
for i, piece in enumerate(pieces):
if piece.count('src='):
self.url = pieces[i + 1]
return
| Python | 0.000053 | |
6da1f28296a8db0c18c0726dcfdc0067bebd9114 | add a script to test learned DQN | learning_tools/keras-rl/dqn/dqn_tester.py | learning_tools/keras-rl/dqn/dqn_tester.py | import numpy as np
import gym
import os
import pickle
import argparse
import pandas as pd
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten
from keras.optimizers import Adam
from rl.agents.dqn import DQNAgent
from rl.policy import BoltzmannQPolicy, LinearAnnealedPolicy
from rl.memory import SequentialMemory
from oscar.env.envs.general_learning_env import GeneralLearningEnv
CONFIG_FILE = 'config/learning_complex.json'
WEIGHT_FILE = 'ML_homework/results/2018-04-22_16/duel_dqn_learning_complex_weights.h5f'
# Get the environment and extract the number of actions.
env = GeneralLearningEnv(CONFIG_FILE, True, log_file_path=None, publish_stats=False)
np.random.seed(123)
env.seed(123)
nb_actions = env.action_space.n
model = Sequential()
model.add(Flatten(input_shape=(1,) + env.observation_space.shape))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(nb_actions, activation='linear'))
print(model.summary())
memory = SequentialMemory(limit=50000, window_length=1)
boltzmann_policy = BoltzmannQPolicy(tau=1.0, clip=(0.0, 500.0))
# enable the dueling network
# you can specify the dueling_type to one of {'avg','max','naive'}
dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=10, policy=boltzmann_policy,
enable_dueling_network=True, dueling_type='avg', target_model_update=1e-2)
dqn.compile(Adam(lr=1e-3), metrics=['mae'])
dqn.load_weights(WEIGHT_FILE)
# Finally, evaluate our algorithm for 5 episodes.
dqn.test(env, nb_episodes=1, visualize=False)
env.close()
del env
| Python | 0 | |
9f6f6b727458eb331d370443074a58d1efa6d755 | Add migration for blank true. | kolibri/logger/migrations/0003_auto_20170531_1140.py | kolibri/logger/migrations/0003_auto_20170531_1140.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2017-05-31 18:40
from __future__ import unicode_literals
import kolibri.core.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('logger', '0002_auto_20170518_1031'),
]
operations = [
migrations.AlterField(
model_name='usersessionlog',
name='last_interaction_timestamp',
field=kolibri.core.fields.DateTimeTzField(blank=True, null=True),
),
]
| Python | 0.000141 | |
d68a89b73e6ff47a2ebd169c06070815d9fd859c | Add example tests for REST API | game/tests/test_api.py | game/tests/test_api.py | # -*- coding: utf-8 -*-
# Code for Life
#
# Copyright (C) 2015, Ocado Limited
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ADDITIONAL TERMS – Section 7 GNU General Public Licence
#
# This licence does not grant any right, title or interest in any “Ocado” logos,
# trade names or the trademark “Ocado” or any other trademarks or domain names
# owned by Ocado Innovation Limited or the Ocado group of companies or any other
# distinctive brand features of “Ocado” as may be secured from time to time. You
# must not distribute any modification of this program using the trademark
# “Ocado” or claim any affiliation or association with Ocado or its employees.
#
# You are not authorised to use the name Ocado (or any of its trade names) or
# the names of any author or contributor in advertising or for publicity purposes
# pertaining to the distribution of this program, without the prior written
# authorisation of Ocado.
#
# Any propagation, distribution or conveyance of this program must include this
# copyright notice and these terms. You must not misrepresent the origins of this
# program; modified versions of the program must be marked as such and not
# identified as the original program.
from django.core.urlresolvers import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from hamcrest import *
from hamcrest.core.base_matcher import BaseMatcher
from game.models import Decor
class APITests(APITestCase):
def test_list_decors(self):
url = reverse('decor-list')
response = self.client.get(url)
assert_that(response, has_status_code(status.HTTP_200_OK))
assert_that(response.data, has_length(len(Decor.objects.all())))
def test_known_decor_detail(self):
decor_id = 1
url = reverse('decor-detail', kwargs={'pk': decor_id})
response = self.client.get(url)
assert_that(response, has_status_code(status.HTTP_200_OK))
assert_that(response.data['id'], equal_to(decor_id))
def test_unknown_decor_detail(self):
decor_id = 0
url = reverse('decor-detail', kwargs={'pk': decor_id})
response = self.client.get(url)
assert_that(response, has_status_code(status.HTTP_404_NOT_FOUND))
def test_levels_for_known_episode(self):
episode_id = 1
url = reverse('level-for-episode', kwargs={'pk': episode_id})
response = self.client.get(url)
assert_that(response, has_status_code(status.HTTP_200_OK))
assert_that(response.data, has_length(greater_than(0)))
def test_levels_for_unknown_episode(self):
episode_id = 0
url = reverse('level-for-episode', kwargs={'pk': episode_id})
response = self.client.get(url)
assert_that(response, has_status_code(status.HTTP_200_OK))
assert_that(response.data, has_length(0))
def has_status_code(status_code):
return HasStatusCode(status_code)
class HasStatusCode(BaseMatcher):
def __init__(self, status_code):
self.status_code = status_code
def _matches(self, response):
return response.status_code == self.status_code
def describe_to(self, description):
description.append_text('has status code ').append_text(self.status_code)
def describe_mismatch(self, response, mismatch_description):
mismatch_description.append_text('had status code ').append_text(response.status_code)
| Python | 0 | |
529f5ac7fe7a41dda9a9078df17b0fd27d897597 | Add another example, which was created during a user training. | pymcxray/examples/simulation_training_map.py | pymcxray/examples/simulation_training_map.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
.. py:currentmodule:: pymcxray.examples.simulation_test_maps
:synopsis: Script to simulate mcxray maps for MM2017 with Nadi.
.. moduleauthor:: Hendrix Demers <hendrix.demers@mail.mcgill.ca>
Script to simulate mcxray maps for MM2017 with Nadi.
"""
###############################################################################
# Copyright 2017 Hendrix Demers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
# Standard library modules.
import logging
import os.path
# Third party modules.
import matplotlib.pyplot as plt
import h5py
import numpy as np
# Local modules.
import pymcxray.mcxray as mcxray
import pymcxray.FileFormat.Results.XrayIntensities as XrayIntensities
import pymcxray.FileFormat.Results.XraySpectraSpecimenEmittedDetected as XraySpectraSpecimenEmittedDetected
import pymcxray.FileFormat.Results.ElectronResults as ElectronResults
import pymcxray.FileFormat.Results.XraySpectraRegionsEmitted as XraySpectraRegionsEmitted
from pymcxray.SimulationsParameters import SimulationsParameters, PARAMETER_INCIDENT_ENERGY_keV, PARAMETER_NUMBER_ELECTRONS, \
PARAMETER_BEAM_POSITION_nm, PARAMETER_NUMBER_XRAYS, PARAMETER_WEIGHT_FRACTIONS
from pymcxray.Simulation import createAlloyThinFilm
import pymcxray.FileFormat.Specimen as Specimen
import pymcxray.FileFormat.Region as Region
import pymcxray.FileFormat.RegionType as RegionType
import pymcxray.FileFormat.RegionDimensions as RegionDimensions
import pymcxray.FileFormat.Element as Element
# Project modules.
from pymcxray import get_current_module_path, get_mcxray_program_name
# Globals and constants variables.
class SimulationTrainingMapsMM2017(mcxray._Simulations):
def _initData(self):
self.use_hdf5 = True
self.delete_result_files = False
self.createBackup = True
# Local variables for value and list if values.
energy_keV = 30.0
number_electrons = 10000
number_xrays = 10
weight_fract = [(0.975, 1.0-0.975)] #Mass fraction of the two elements in the map
xs_nm = np.linspace(-5.0e3, 5.0e3, 10) #Number of position acquired by side (# of pixel by side)
probePositions_nm = [tuple(position_nm) for position_nm in
np.transpose([np.tile(xs_nm, len(xs_nm)), np.repeat(xs_nm, len(xs_nm))]).tolist()]
# Simulation parameters
self._simulationsParameters = SimulationsParameters()
self._simulationsParameters.addVaried(PARAMETER_BEAM_POSITION_nm, probePositions_nm)
self._simulationsParameters.addFixed(PARAMETER_WEIGHT_FRACTIONS, weight_fract)
self._simulationsParameters.addFixed(PARAMETER_NUMBER_XRAYS, number_xrays)
self._simulationsParameters.addFixed(PARAMETER_INCIDENT_ENERGY_keV, energy_keV)
self._simulationsParameters.addFixed(PARAMETER_NUMBER_ELECTRONS, number_electrons)
def getAnalysisName(self):
return "SimulationTrainingMapsMM2017_wfFe975" # Name of the hdf5 file created
def createSpecimen(self, parameters):
weight_fract = parameters[PARAMETER_WEIGHT_FRACTIONS]
elements = [(26, weight_fract[0]), (27, weight_fract[1])] # (Atomic number, mass fraction)
filmThickness_nm = 200.0
specimen = createAlloyThinFilm(elements, filmThickness_nm)
return specimen
def read_one_results_hdf5(self, simulation, hdf5_group):
electronResults = ElectronResults.ElectronResults()
electronResults.path = self.getSimulationsPath()
electronResults.basename = simulation.resultsBasename
electronResults.read()
electronResults.write_hdf5(hdf5_group)
xrayIntensities = XrayIntensities.XrayIntensities()
xrayIntensities.path = self.getSimulationsPath()
xrayIntensities.basename = simulation.resultsBasename
xrayIntensities.read()
xrayIntensities.write_hdf5(hdf5_group)
spectrum = XraySpectraRegionsEmitted.XraySpectraRegionsEmitted()
spectrum.path = self.getSimulationsPath()
spectrum.basename = simulation.resultsBasename
spectrum.read()
spectrum.write_hdf5(hdf5_group)
spectrum = XraySpectraSpecimenEmittedDetected.XraySpectraSpecimenEmittedDetected()
spectrum.path = self.getSimulationsPath()
spectrum.basename = simulation.resultsBasename
spectrum.read()
spectrum.write_hdf5(hdf5_group)
def analyze_results_hdf5(self): #pragma: no cover
self.readResults()
file_path = self.get_hdf5_file_path()
with h5py.File(file_path, 'r', driver='core') as hdf5_file:
hdf5_group = self.get_hdf5_group(hdf5_file)
logging.info(hdf5_group.name)
def run():
# import the batch file class.
from pymcxray.BatchFileConsole import BatchFileConsole
# Find the configuration file path
configuration_file_path = get_current_module_path(__file__, "MCXRay_latest.cfg")
program_name = get_mcxray_program_name(configuration_file_path)
# Create the batch file object.
batch_file = BatchFileConsole("BatchSimulationTrainingMapsMM2017", program_name, numberFiles=10)
# Create the simulation object and add the batch file object to it.
analyze = SimulationTrainingMapsMM2017(relativePath=r"mcxray/SimulationTrainingMapsMM2017",
configurationFilepath=configuration_file_path)
analyze.run(batch_file)
if __name__ == '__main__': #pragma: no cover
import sys
logging.getLogger().setLevel(logging.INFO)
logging.info(sys.argv)
if len(sys.argv) == 1:
#sys.argv.append(mcxray.ANALYZE_TYPE_GENERATE_INPUT_FILE)
#sys.argv.append(mcxray.ANALYZE_TYPE_CHECK_PROGRESS)
sys.argv.append(mcxray.ANALYZE_TYPE_READ_RESULTS)
#sys.argv.append(mcxray.ANALYZE_TYPE_ANALYZE_RESULTS)
#sys.argv.append(mcxray.ANALYZE_TYPE_ANALYZE_SCHEDULED_READ)
run()
| Python | 0 | |
b04e3787de29d4bee68854e15a7e783cbe3c3bd0 | Add test for microstructure generator | pymks/tests/test_microstructure_generator.py | pymks/tests/test_microstructure_generator.py | import pytest
import numpy as np
from pymks.datasets import make_microstructure
@pytest.mark.xfail
def test_size_and_grain_size_failure():
make_microstructure(n_samples=1, size=(7, 7), grain_size=(8, 1))
@pytest.mark.xfail
def test_volume_fraction_failure():
make_microstructure(n_samples=1, volume_fraction=(0.3, 0.6))
@pytest.mark.xfail
def test_volume_fraction_with_n_phases_failure():
make_microstructure(n_samples=1, size=(7, 7), n_phases=3,
volume_fraction=(0.5, 0.5))
@pytest.mark.xfail
def test_percent_variance_exceeds_limit_failure():
make_microstructure(n_samples=1, size=(7, 7), n_phases=3,
volume_fraction=(0.3, 0.3, 0.4), percent_variance=0.5)
def test_volume_fraction():
X = make_microstructure(n_samples=1, n_phases=3,
volume_fraction=(0.3, 0.2, 0.5))
assert np.allclose(np.sum(X == 1) / float(X.size), 0.2, rtol=1e-4)
assert np.allclose(np.sum(X == 2) / float(X.size), 0.5, atol=1e-4)
def test_percent_variance():
X = make_microstructure(n_samples=1, n_phases=3,
volume_fraction=(0.3, 0.2, 0.5),
percent_variance=.2)
print np.sum(X == 1) / float(X.size)
print np.sum(X == 2) / float(X.size)
assert np.allclose(np.sum(X == 1) / float(X.size), 0.09, atol=1e-2)
assert np.allclose(np.sum(X == 2) / float(X.size), 0.57, atol=1e-2)
if __name__ == '__main__':
test_volume_fraction()
test_percent_variance()
| Python | 0 | |
d741dd17315ffdf8935d22989ce680cbf08f46fa | Add TV plugin | plugins/tv/plugin.py | plugins/tv/plugin.py | from datetime import datetime
import logging
from twisted.internet import defer
from twisted.internet.threads import deferToThread
import requests
from cardinal.decorators import command
from cardinal.decorators import help
class ShowNotFoundException(Exception):
pass
@defer.inlineCallbacks
def fetch_show(show):
r = yield deferToThread(
requests.get,
"https://api.tvmaze.com/singlesearch/shows",
params={"q": show}
)
if r.status_code == 404:
raise ShowNotFoundException
r.raise_for_status()
data = r.json()
network = None
if data['network']:
network = data['network']['name']
country = data['network']['country']
if country:
country = country['code']
elif data['webChannel']:
network = data['webChannel']['name']
country = data['webChannel']['country']
if country:
country = country['code']
schedule = None
if data['schedule']:
schedule = ', '.join(data['schedule']['days'])
time = data['schedule']['time']
if time:
am_pm = "AM"
hour, minute = data['schedule']['time'].split(":", 1)
hour = int(hour)
if hour >= 13:
hour -= 12
am_pm = "PM"
time = "{}:{} {}".format(hour, minute, am_pm)
schedule += " @ {} EST".format(time)
next_episode = data.get('_links', {}) \
.get('nextepisode', {}) \
.get('href', None)
previous_episode = data.get('_links', {}) \
.get('previousepisode', {}) \
.get('href', None)
imdb_url = None
if data['externals']['imdb']:
imdb_url = "https://imdb.com/{}".format(data['externals']['imdb'])
return {
'name': data['name'],
'network': network,
'country': country,
'status': data['status'],
'schedule': schedule,
'imdb_url': imdb_url,
'_links': {
'next_episode': next_episode,
'previous_episode': previous_episode,
}
}
@defer.inlineCallbacks
def fetch_episode(uri):
r = yield deferToThread(
requests.get,
uri,
)
r.raise_for_status()
data = r.json()
return {
'name': data['name'],
'season': data['season'],
'episode': data['number'],
'airdate': (datetime.fromisoformat(data['airdate'])
if data['airdate'] else
None),
}
def format_episode(data):
if data is None:
return 'TBA'
if data['season'] and data['episode']:
ep_marker = "S{:0>2}E{:0>2}".format(data['season'], data['episode'])
# hopefully nothing is missing a season also...
else:
ep_marker = "Season {:0>2} Special".format(data['season'])
airdate = data['airdate'].strftime("%d %b %Y") \
if data['airdate'] else \
"TBA"
return "{} - {} [{}]".format(ep_marker, data['name'], airdate)
class TVPlugin:
def __init__(self):
self.logger = logging.getLogger(__name__)
@command('ep')
@help('Get the next air date for a TV show')
@help('Syntax: .ep <tv show>')
@defer.inlineCallbacks
def next_air_date(self, cardinal, user, channel, msg):
try:
show = msg.split(' ', 1)[1]
except IndexError:
cardinal.sendMsg(channel, "Syntax: .ep <tv show>")
return
try:
show = yield fetch_show(show)
except ShowNotFoundException:
cardinal.sendMsg(
channel,
"Couldn't find anything for '{}'".format(show)
)
return
except Exception:
self.logger.exception("Error reaching TVMaze")
cardinal.sendMsg(channel, "Error reaching TVMaze")
return
# Fetch next & previous episode info
next_episode = None
if show['_links']['next_episode']:
next_episode = yield fetch_episode(
show['_links']['next_episode'])
previous_episode = None
if show['_links']['previous_episode']:
previous_episode = yield fetch_episode(
show['_links']['previous_episode'])
# Format header
header = show['name']
if show['network'] and show['country']:
header += " [{} - {}]".format(show['country'], show['network'])
elif show['network'] or show['country']:
header += " [{}]".format(
show['network'] if show['network'] else show['country']
)
if show['schedule']:
header += " - {}".format(show['schedule'])
header += " - [{}]".format(show['status'])
# Build messages
messages = [header]
messages.append("Last Episode: {}".format(
format_episode(previous_episode)
))
if show['status'] != 'Ended':
messages.append("Next Episode: {}".format(
format_episode(next_episode)
))
if show['imdb_url']:
messages.append(show['imdb_url'])
# Show Name [Network] - [Status]
# - or -
# Show Name [UK - Network] - Date @ Time EST - [Status]
# Last Episode: S05E10 - Nemesis Games [12 May 2021]
# Next Episode: S05E11 - Mourning [19 May 2021]
# https://imdb.com/tt1234123
for message in messages:
cardinal.sendMsg(channel, message)
def setup():
return TVPlugin()
| Python | 0 | |
74ede836ad6572c9e6c7865e5d29671a994629af | Create ManifoldWR.py | SmilNN/ManifoldWR.py | SmilNN/ManifoldWR.py | # -*- coding: utf-8 -*-
import numpy as np
import keras.backend as K
from keras.callbacks import ModelCheckpoint
from keras.layers import Dense
from keras.layers.noise import GaussianDropout
from keras.models import Sequential
from keras.optimizers import SGD
from sklearn.datasets import load_svmlight_file
from keras.regularizers import WeightRegularizer
class ManifoldWeightRegularizer(WeightRegularizer):
def __init__(self, m=0., **kwargs):
self.m = K.cast_to_floatx(m)
super(ManifoldWeightRegularizer, self).__init__(**kwargs)
def __call__(self, loss):
if not hasattr(self, 'p'):
raise Exception('Need to call `set_param` on '
'WeightRegularizer instance '
'before calling the instance. '
'Check that you are not passing '
'a WeightRegularizer instead of an '
'ActivityRegularizer '
'(i.e. activity_regularizer="l2" instead '
'of activity_regularizer="activity_l2".')
regularized_loss = loss + K.sum(K.abs(self.p)) * self.l1
regularized_loss += K.sum(K.square(self.p)) * self.l2
#
out_dim = self.p.shape.eval()[-1]
diff_mat = np.eye(out_dim) - np.eye(out_dim, k=1)
diff_mat[-1, -1] = 0
d = K.variable(diff_mat)
regularized_loss += K.sum(K.square(K.dot(self.p, d))) * self.m
return K.in_train_phase(regularized_loss, loss)
# 讀檔
def read_data(file):
x, y = load_svmlight_file(file, dtype=np.float32)
return x.todense(), y - 1 # y要從0開始
train_X, train_y = read_data('Data/training_data_libsvm.txt')
test_X, test_y = read_data('Data/testing_data_libsvm.txt')
feat_dim = train_X.shape[-1]
num_class = np.max(train_y) + 1
print 'feat_dim=%d, num_class=%d' % (feat_dim, num_class)
#
model = Sequential()
model.add(Dense(1024, activation='relu', input_dim=feat_dim, init='uniform'))
model.add(GaussianDropout(0.5))
model.add(Dense(num_class, activation='softmax', W_regularizer=ManifoldWeightRegularizer(m=0.1)))
model.compile(optimizer='Adadelta',
loss='sparse_categorical_crossentropy', # 因為label直接是class id
metrics=['accuracy'])
mdlchk = ModelCheckpoint(filepath='weights.best.hdf5', save_best_only=True, monitor='val_acc')
model.fit(train_X, train_y, validation_data=(test_X, test_y), batch_size=100, nb_epoch=200, verbose=2, callbacks=[mdlchk]) # starts training
model.load_weights('weights.best.hdf5')
model.compile(optimizer=SGD(lr=1e-7, momentum=0.9),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(train_X, train_y, validation_data=(test_X, test_y), batch_size=1, nb_epoch=3, verbose=1, callbacks=[mdlchk]) # starts training
model.load_weights('weights.best.hdf5')
loss, acc = model.evaluate(test_X, test_y, batch_size=5000)
print "Loss=%.4f, ACC=%.4f" % (loss, acc)
| Python | 0.000002 | |
14a9bd6a1c5e1a5605f5161b46d65bd35f89e44a | Add foreman callback plugin (#17141) | lib/ansible/plugins/callback/foreman.py | lib/ansible/plugins/callback/foreman.py | # -*- coding: utf-8 -*-
# (C) 2015, 2016 Daniel Lobato <elobatocs@gmail.com>
# 2016 Guido Günther <agx@sigxcpu.org>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from datetime import datetime
from collections import defaultdict
import json
import time
try:
import requests
HAS_REQUESTS = True
except ImportError:
HAS_REQUESTS = False
from ansible.plugins.callback import CallbackBase
class CallbackModule(CallbackBase):
"""
This callback will report facts and reports to Foreman https://theforeman.org/
It makes use of the following environment variables:
FOREMAN_URL: URL to the Foreman server
FOREMAN_SSL_CERT: X509 certificate to authenticate to Foreman if
https is used
FOREMAN_SSL_KEY: the corresponding private key
FOREMAN_SSL_VERIFY: wether to verify the Foreman certificate
It can be set to '1' to verify SSL certificates using the
installed CAs or to a path pointing to a CA bundle. Set to '0'
to disable certificate checking.
"""
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'notification'
CALLBACK_NAME = 'foreman'
CALLBACK_NEEDS_WHITELIST = True
FOREMAN_URL = os.getenv('FOREMAN_URL', "http://localhost:3000")
FOREMAN_SSL_CERT = (os.getenv('FOREMAN_SSL_CERT',
"/etc/foreman/client_cert.pem"),
os.getenv('FOREMAN_SSL_KEY',
"/etc/foreman/client_key.pem"))
FOREMAN_SSL_VERIFY = os.getenv('FOREMAN_SSL_VERIFY', "1")
FOREMAN_HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json"
}
TIME_FORMAT = "%Y-%m-%d %H:%M:%S %f"
def __init__(self):
super(CallbackModule, self).__init__()
self.items = defaultdict(list)
self.start_time = int(time.time())
if HAS_REQUESTS:
requests_major = int(requests.__version__.split('.')[0])
if requests_major >= 2:
self.ssl_verify = self._ssl_verify()
else:
self._disable_plugin('The `requests` python module is too old.')
else:
self._disable_plugin('The `requests` python module is not installed.')
def _disable_plugin(self, msg):
self.disabled = True
self._display.warning(msg + ' Disabling the Foreman callback plugin.')
def _ssl_verify(self):
if self.FOREMAN_SSL_VERIFY.lower() in ["1", "true", "on"]:
verify = True
elif self.FOREMAN_SSL_VERIFY.lower() in ["0", "false", "off"]:
requests.packages.urllib3.disable_warnings()
self._display.warning("SSL verification of %s disabled" %
self.FOREMAN_URL)
verify = False
else: # Set ta a CA bundle:
verify = self.FOREMAN_SSL_VERIFY
return verify
def send_facts(self, host, data):
"""
Sends facts to Foreman, to be parsed by foreman_ansible fact
parser. The default fact importer should import these facts
properly.
"""
data["_type"] = "ansible"
data["_timestamp"] = datetime.now().strftime(self.TIME_FORMAT)
facts = {"name": host,
"facts": data,
}
requests.post(url=self.FOREMAN_URL + '/api/v2/hosts/facts',
data=json.dumps(facts),
headers=self.FOREMAN_HEADERS,
cert=self.FOREMAN_SSL_CERT,
verify=self.ssl_verify)
def _build_log(self, data):
logs = []
for entry in data:
source, msg = entry
if 'failed' in msg:
level = 'err'
else:
level = 'notice' if 'changed' in msg and msg['changed'] else 'info'
logs.append({"log": {
'sources': {'source': source},
'messages': {'message': json.dumps(msg)},
'level': level
}})
return logs
def send_reports(self, stats):
"""
Send reports to Foreman to be parsed by its config report
importer. THe data is in a format that Foreman can handle
without writing another report importer.
"""
status = defaultdict(lambda: 0)
metrics = {}
for host in stats.processed.keys():
sum = stats.summarize(host)
status["applied"] = sum['changed']
status["failed"] = sum['failures'] + sum['unreachable']
status["skipped"] = sum['skipped']
log = self._build_log(self.items[host])
metrics["time"] = {"total": int(time.time()) - self.start_time}
now = datetime.now().strftime(self.TIME_FORMAT)
report = {
"report": {
"host": host,
"reported_at": now,
"metrics": metrics,
"status": status,
"logs": log,
}
}
# To be changed to /api/v2/config_reports in 1.11. Maybe we
# could make a GET request to get the Foreman version & do
# this automatically.
requests.post(url=self.FOREMAN_URL + '/api/v2/reports',
data=json.dumps(report),
headers=self.FOREMAN_HEADERS,
cert=self.FOREMAN_SSL_CERT,
verify=self.ssl_verify)
self.items[host] = []
def append_result(self, result):
name = result._task.get_name()
host = result._host.get_name()
self.items[host].append((name, result._result))
# Ansible callback API
def v2_runner_on_failed(self, result, ignore_errors=False):
self.append_result(result)
def v2_runner_on_unreachable(self, result):
self.append_result(result)
def v2_runner_on_async_ok(self, result, jid):
self.append_result(result)
def v2_runner_on_async_failed(self, result, jid):
self.append_result(result)
def v2_playbook_on_stats(self, stats):
self.send_reports(stats)
def v2_runner_on_ok(self, result):
res = result._result
try:
module = res['invocation']['module_name']
except KeyError:
module = None
if module == 'setup':
host = result._host.get_name()
self.send_facts(host, res)
else:
self.append_result(result)
| Python | 0 | |
04dcdadf4f8b18405754683af0138ddc8363580e | Create followExpression.py | maya/python/animation/followExpression.py | maya/python/animation/followExpression.py | ctrlShape = cmds.createNode('locator')
ctrlTransform = cmds.listRelatives(ctrlShape,p=True,f=True)
if isinstance(ctrlTransform,list):
ctrlTransform = ctrlTransform[0]
jt = cmds.createNode('joint',n='followJoint')
attrName = 'follow'
if not cmds.attributeQuery(attrName,n=ctrlTransform,ex=True):
cmds.addAttr(ctrlTransform,ln=attrName,at='double',min=0.0,max=1.0,dv=0.1)
cmds.setAttr('%s.%s'%(ctrlTransform,attrName),e=True,k=True)
exp = '{\n\t$tx1 = %s.translateX;\n'%ctrlTransform
exp += '\t$ty1 = %s.translateY;\n'%ctrlTransform
exp += '\t$tz1 = %s.translateZ;\n'%ctrlTransform
exp += '\t$tx2 = %s.translateX;\n'%jt
exp += '\t$ty2 = %s.translateY;\n'%jt
exp += '\t$tz2 = %s.translateZ;\n'%jt
exp += '\t\n\t$f = %s.follow;\n'%ctrlTransform
exp += '\t$dx = $tx1;\n'
exp += '\t$dy = $ty1;\n'
exp += '\t$dz = $tz1;\n'
exp += '\tif ($f > 0.0)\n\t{\n\t\t$dx = ($tx1-$tx2)*$f;\n'
exp += '\t\t$dy = ($ty1-$ty2)*$f;\n'
exp += '\t\t$dz = ($tz1-$tz2)*$f;\n'
exp += '\t}\n\t%s.translateX += $dx;\n'%jt
exp += '\t%s.translateY += $dy;\n'%jt
exp += '\t%s.translateZ += $dz;\n'%jt
exp += '}'
cmds.expression(s=exp)
| Python | 0.000001 | |
58c62061c0c02682f96d6793b0570b455887d392 | Add pytest tools | delocate/tests/pytest_tools.py | delocate/tests/pytest_tools.py | import pytest
def assert_true(condition):
__tracebackhide__ = True
assert condition
def assert_false(condition):
__tracebackhide__ = True
assert not condition
def assert_raises(expected_exception, *args, **kwargs):
__tracebackhide__ = True
return pytest.raises(expected_exception, *args, **kwargs)
def assert_equal(first, second):
__tracebackhide__ = True
assert first == second
def assert_not_equal(first, second):
__tracebackhide__ = True
assert first != second
| Python | 0.000001 | |
dd93995a119323d9b67dce1f8797eb72788a044a | solve 12704 | UVA/vol-127/12704.py | UVA/vol-127/12704.py | from sys import stdin, stdout
I = list(map(int, stdin.read().split()))
for i in range(0, I[0]):
[x, y, r] = I[3*i + 1: 3*i + 4]
cd = (x*x + y*y) ** 0.5
stdout.write('{:.2f} {:.2f}\n'.format(r-cd, r+cd))
| Python | 0.999999 | |
08cade084a38952907b24580f597da08f850573d | Add mongo_source | mongo_source.py | mongo_source.py | #! /usr/bin/env python
# coding:utf-8
import pymongo as pm
import preprocessing
import sys
import re
import os
def twitter(
db: str,
coll: str,
target: str,
logger,
target_dir: str="source",
sfilter: dict={},
delimiter: str=",",
limit: int=0,
verbose: bool=True,
host: str="localhost",
port: int=27017,
):
# mongo
client = pm.MongoClient(host, port)
colld = client[db][coll]
# converter
# do
# self.remove_newline,
# self.remove_link,
# self.convert_cont_spaces,
# self.strip
convert = preprocessing.Preprocess()
text_file = os.path.join(
target_dir,
"{}.txt".format(target)
)
conv_file = os.path.join(
target_dir,
"{}.conv.txt".format(target)
)
#
# text
textd = open(text_file, "a")
text_count = 0
for i, tweet in enumerate(colld.find(
{"text": {"$exists": True}},
timeout=False,
limit=limit
)):
text = convert.execute(tweet["text"])
if text:
print(text, file=textd)
text_count += 1
if (i + 1) % 10000 == 0:
logger.debug("text extracted: {}".format(i+1))
textd.close()
logger.info(
"{}/{} texts extracted in {}".format(text_count, i+1, text_file)
)
#
# conversation
#
convd = open(conv_file, "a")
conv_count = 0
flt = {"text": {"$exists": True},
"in_reply_to_status_id": {"$ne": None}
}
flt.update(sfilter)
# delimiter regex
re_del = re.compile(r"{}".format(delimiter))
for i, tweet in enumerate(colld.find(
flt,
limit=limit
)):
try:
origtw = colld.find_one({
"text": {"$exists": True},
"id": tweet["in_reply_to_status_id"]})
if origtw:
orig = convert.execute(origtw["text"])
reply = convert.execute(tweet["text"])
# Output to files
if orig and reply and \
(not re_del.search(orig)) and \
(not re_del.search(reply)):
print("{}{}{}".format(orig, delimiter, reply), file=convd)
conv_count += 1
except KeyboardInterrupt:
sys.exit(1)
except:
logger.exception("error raised while extraction conversation")
if (i + 1) % 10000 == 0:
logger.debug("conv extracted: {}".format(i+1))
convd.close()
logger.info(
"{}/{} conversations extracted in {}".format(
conv_count, i+1, conv_file
)
)
if __name__ == '__main__':
from logging import getLogger, basicConfig, DEBUG, INFO
import argparse
# parse arg
parser = argparse.ArgumentParser()
parser.add_argument(
"db",
type=str,
help="database name"
)
parser.add_argument(
"coll",
type=str,
help="collection name"
)
parser.add_argument(
"target",
type=str,
help="target name"
)
parser.add_argument(
"-t", "--target-dir",
type=str,
nargs="?",
default="source",
help="target name"
)
parser.add_argument(
"-d", "--delimiter",
type=str,
nargs="?",
default=",",
help="target name"
)
parser.add_argument(
"-v", "--verbose",
action="store_true",
help="show DEBUG log"
)
parser.add_argument(
"-l", "--limit",
type=int,
nargs="?",
default=0,
help="the number of extracted documents"
)
args = parser.parse_args()
# log
logger = getLogger(__name__)
basicConfig(
level=DEBUG if args.verbose else INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
)
# args
db = args.db
coll = args.coll
target = args.target
target_dir = args.target_dir
delimiter = args.delimiter
limit = args.limit
logger.info("processing coll {}".format(coll))
twitter(
db=db,
coll=coll,
target=target,
logger=logger,
target_dir=target_dir,
sfilter={"user.screen_name": "kenkov"},
delimiter=delimiter,
limit=limit,
)
| Python | 0.000002 | |
3ad0f9ee142e3a08e82749f47003870f14029bff | Fix urls.py to point to web version of view | mysite/urls.py | mysite/urls.py | from django.conf.urls.defaults import *
import settings
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
(r'^$', 'mysite.search.views.fetch_bugs'),
(r'^search/$', 'mysite.search.views.fetch_bugs'),
(r'^admin/(.*)', admin.site.root),
(r'^static/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.STATIC_DOC_ROOT}),
(r'^people/add_contribution$', 'mysite.profile.views.add_contribution_web'),
(r'^people/$', 'mysite.profile.views.display_person_web'),
(r'^people/get_data_for_email$', 'mysite.profile.views.get_data_for_email'),
(r'^people/change_what_like_working_on$',
'mysite.profile.views.change_what_like_working_on_web'),
(r'^people/add_tag_to_project_exp$',
'mysite.profile.views.add_tag_to_project_exp_web'),
(r'^people/project_exp_tag__remove$',
'mysite.profile.views.project_exp_tag__remove__web'),
(r'^people/make_favorite_project_exp$',
'mysite.profile.views.make_favorite_project_exp_web'),
(r'^people/make_favorite_exp_tag$',
'mysite.profile.views.make_favorite_exp_tag_web'),
(r'^people/add_contrib$',
'mysite.profile.views.display_person_old'),
(r'^people/sf_projects_by_person$',
'mysite.profile.views.sf_projects_by_person_web'),
# Experience scraper
(r'^people/exp_scraper$',
'mysite.profile.views.exp_scraper_display_input_form'),
(r'^people/exp_scrape$',
'mysite.profile.views.exp_scraper_scrape_web'),
# Get a list of suggestions for the search input, formatted the way that
# the jQuery autocomplete plugin wants it.
(r'^search/get_suggestions$', 'mysite.search.views.request_jquery_autocompletion_suggestions'),
)
# vim: set ai ts=4 sts=4 et sw=4:
| from django.conf.urls.defaults import *
import settings
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
(r'^$', 'mysite.search.views.fetch_bugs'),
(r'^search/$', 'mysite.search.views.fetch_bugs'),
(r'^admin/(.*)', admin.site.root),
(r'^static/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.STATIC_DOC_ROOT}),
(r'^people/add_contribution$', 'mysite.profile.views.add_contribution_web'),
(r'^people/$', 'mysite.profile.views.display_person_web'),
(r'^people/get_data_for_email$', 'mysite.profile.views.get_data_for_email'),
(r'^people/change_what_like_working_on$',
'mysite.profile.views.change_what_like_working_on_web'),
(r'^people/add_tag_to_project_exp$',
'mysite.profile.views.add_tag_to_project_exp_web'),
(r'^people/project_exp_tag__remove$',
'mysite.profile.views.project_exp_tag__remove__web'),
(r'^people/make_favorite_project_exp$',
'mysite.profile.views.make_favorite_project_exp_web'),
(r'^people/make_favorite_exp_tag$',
'mysite.profile.views.make_favorite_exp_tag_web'),
(r'^people/add_contrib$',
'mysite.profile.views.display_person_old'),
(r'^people/sf_projects_by_person$',
'mysite.profile.views.sf_projects_by_person_web'),
# Experience scraper
(r'^people/exp_scraper$',
'mysite.profile.views.exp_scraper_display_input_form'),
(r'^people/exp_scrape$',
'mysite.profile.views.exp_scraper_scrape'),
# Get a list of suggestions for the search input, formatted the way that
# the jQuery autocomplete plugin wants it.
(r'^search/get_suggestions$', 'mysite.search.views.request_jquery_autocompletion_suggestions'),
)
# vim: set ai ts=4 sts=4 et sw=4:
| Python | 0.000004 |
920d9db6ff993d57349dd0e3f12d53f284fd05ed | Create nasa_weblogs.py | nasa_weblogs.py | nasa_weblogs.py | #!/usr/bin/env python
import sys
import os
import re
import pandas as pd
# import modin.pandas as pd #replcaing basic Pandas with faster modin
from pyspark.context import SparkContext
from pyspark.sql.session import SparkSession
from pyspark.sql.context import SQLContext
from pyspark.sql.functions import udf
import glob
sc = SparkContext()
sqlContext = SQLContext(sc)
spark = SparkSession(sc)
raw_data = glob.glob("data/*.log")
df = spark.read.text(raw_data)
df.printSchema()
df.show(5, truncate=False)
sample_logs = [item['value'] for item in df.take(15)]
# EXTRACT HOSTS
host_pattern = r'(^\S+\.[\S+\.]+\S+)\s'
hosts = [re.search(host_pattern, item).group(1)
if re.search(host_pattern, item)
else 'no match'
for item in sample_logs]
# EXTRACT TIMESTAMPS
ts_pattern = r'\[(\d{2}/\w{3}/\d{4}:\d{2}:\d{2}:\d{2} -\d{4})]'
timestamps = [re.search(ts_pattern, item).group(1) for item in sample_logs]
# EXTRACT HTTP METHODS/PROTOCOLS
method_uri_protocol_pattern = r'\"(\S+)\s(\S+)\s*(\S*)\"'
method_uri_protocol = [re.search(method_uri_protocol_pattern, item).groups()
if re.search(method_uri_protocol_pattern, item)
else 'no match'
for item in sample_logs]
# EXTRACT STATUS CODES
status_pattern = r'\s(\d{3})\s'
status = [re.search(status_pattern, item).group(1) for item in sample_logs]
# EXTRACT HTTP RESPONSE CONTENT SIZE
content_size_pattern = r'\s(\d+)$'
content_size = [re.search(content_size_pattern, item).group(1) for item in sample_logs]
# COMBINED ALGO
from pyspark.sql.functions import regexp_extract
logs_df = df.select(regexp_extract('value', host_pattern, 1).alias('host'),
regexp_extract('value', ts_pattern, 1).alias('timestamp'),
regexp_extract('value', method_uri_protocol_pattern, 1).alias('method'),
regexp_extract('value', method_uri_protocol_pattern, 2).alias('endpoint'),
regexp_extract('value', method_uri_protocol_pattern, 3).alias('protocol'),
regexp_extract('value', status_pattern, 1).cast('integer').alias('status'),
regexp_extract('value', content_size_pattern, 1).cast('integer').alias('content_size'))
logs_df.show(10, truncate=True)
print((logs_df.count(), len(logs_df.columns)))
# CHECK NULL COLUMN COUNT
(df.filter(df['value'].isNull()).count())
bad_rows_df = logs_df.filter(logs_df['host'].isNull()|
logs_df['timestamp'].isNull() |
logs_df['method'].isNull() |
logs_df['endpoint'].isNull() |
logs_df['status'].isNull() |
logs_df['content_size'].isNull()|
logs_df['protocol'].isNull())
bad_rows_df.count()
# GET COLUMNS WITH NULLS
from pyspark.sql.functions import col
from pyspark.sql.functions import sum as spark_sum
def count_null(col_name):
return spark_sum(col(col_name).isNull().cast('integer')).alias(col_name)
# Build up a list of column expressions, one per column.
exprs = [count_null(col_name) for col_name in logs_df.columns]
# Run the aggregation. The *exprs converts the list of expressions into
# variable function arguments.
logs_df.agg(*exprs).show()
# HANDLE NULLS IN COLUMN - HTTP STATUS
regexp_extract('value', r'\s(\d{3})\s', 1).cast('integer').alias( 'status')
null_status_df = df.filter(~df['value'].rlike(r'\s(\d{3})\s'))
null_status_df.count()
null_status_df.show(truncate=False)
bad_status_df = null_status_df.select(regexp_extract('value', host_pattern, 1).alias('host'),
regexp_extract('value', ts_pattern, 1).alias('timestamp'),
regexp_extract('value', method_uri_protocol_pattern, 1).alias('method'),
regexp_extract('value', method_uri_protocol_pattern, 2).alias('endpoint'),
regexp_extract('value', method_uri_protocol_pattern, 3).alias('protocol'),
regexp_extract('value', status_pattern, 1).cast('integer').alias('status'),
regexp_extract('value', content_size_pattern, 1).cast('integer').alias('content_size'))
bad_status_df.show(truncate=False)
logs_df = logs_df[logs_df['status'].isNotNull()]
exprs = [count_null(col_name) for col_name in logs_df.columns]
logs_df.agg(*exprs).show()
regexp_extract('value', r'\s(\d+)$', 1).cast('integer').alias('content_size')
null_content_size_df = df.filter(~df['value'].rlike(r'\s\d+$'))
null_content_size_df.count()
null_content_size_df.take(10)
logs_df = logs_df.na.fill({'content_size': 0})
exprs = [count_null(col_name) for col_name in logs_df.columns]
logs_df.agg(*exprs).show()
month_map = {
'Jan': 1, 'Feb': 2, 'Mar':3, 'Apr':4, 'May':5, 'Jun':6, 'Jul':7,
'Aug':8, 'Sep': 9, 'Oct':10, 'Nov': 11, 'Dec': 12
}
def parse_clf_time(text):
""" Convert Common Log time format into a Python datetime object
Args:
text (str): date and time in Apache time format [dd/mmm/yyyy:hh:mm:ss (+/-)zzzz]
Returns:
a string suitable for passing to CAST('timestamp')
"""
# NOTE: We're ignoring the time zones here, might need to be handled depending on the problem you are solving
return "{0:04d}-{1:02d}-{2:02d} {3:02d}:{4:02d}:{5:02d}".format(
int(text[7:11]),
month_map[text[3:6]],
int(text[0:2]),
int(text[12:14]),
int(text[15:17]),
int(text[18:20])
)
udf_parse_time = udf(parse_clf_time)
logs_df = (logs_df\
.select('*', udf_parse_time(logs_df['timestamp'])\
.cast('timestamp')\
.alias('time'))\
.drop('timestamp'))
logs_df.show(10, truncate=True)
logs_df.printSchema()
logs_df.cache()
| Python | 0 | |
9d058f4b324dabf4f2cdd2ea88f40c9aabe2d622 | Add test for the Py binding of Hash. | runtime/python/test/test_hash.py | runtime/python/test/test_hash.py | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import inspect
import clownfish
class TestHash(unittest.TestCase):
def testStoreFetch(self):
h = clownfish.Hash()
h.store("foo", "bar")
h.store("foo", "bar")
self.assertEqual(h.fetch("foo"), "bar")
h.store("nada", None)
self.assertEqual(h.fetch("nada"), None)
def testDelete(self):
h = clownfish.Hash()
h.store("foo", "bar")
got = h.delete("foo")
self.assertEqual(h.get_size(), 0)
self.assertEqual(got, "bar")
def testClear(self):
h = clownfish.Hash()
h.store("foo", 1)
h.clear()
self.assertEqual(h.get_size(), 0)
def testHasKey(self):
h = clownfish.Hash()
h.store("foo", 1)
h.store("nada", None)
self.assertTrue(h.has_key("foo"))
self.assertFalse(h.has_key("bar"))
self.assertTrue(h.has_key("nada"))
def testKeys(self):
h = clownfish.Hash()
h.store("a", 1)
h.store("b", 1)
keys = sorted(h.keys())
self.assertEqual(keys, ["a", "b"])
def testValues(self):
h = clownfish.Hash()
h.store("foo", "a")
h.store("bar", "b")
got = sorted(h.values())
self.assertEqual(got, ["a", "b"])
def testGetCapacity(self):
h = clownfish.Hash(capacity=1)
self.assertGreater(h.get_capacity(), 0)
def testGetSize(self):
h = clownfish.Hash()
self.assertEqual(h.get_size(), 0)
h.store("meep", "moop")
self.assertEqual(h.get_size(), 1)
def testEquals(self):
h = clownfish.Hash()
other = clownfish.Hash()
h.store("a", "foo")
other.store("a", "foo")
self.assertTrue(h.equals(other))
other.store("b", "bar")
self.assertFalse(h.equals(other))
self.assertTrue(h.equals({"a":"foo"}),
"equals() true against a Python dict")
vec = clownfish.Vector()
self.assertFalse(h.equals(vec),
"equals() false against conflicting Clownfish type")
self.assertFalse(h.equals(1),
"equals() false against conflicting Python type")
def testIterator(self):
h = clownfish.Hash()
h.store("a", "foo")
i = clownfish.HashIterator(h)
self.assertTrue(i.next())
self.assertEqual(i.get_key(), "a")
self.assertEqual(i.get_value(), "foo")
self.assertFalse(i.next())
if __name__ == '__main__':
unittest.main()
| Python | 0 | |
8706ec4678bc4740b64265ced63fb12d837e0297 | Add Basic Histogram Example | altair/vegalite/v2/examples/histogram.py | altair/vegalite/v2/examples/histogram.py | """
Histogram
-----------------
This example shows how to make a basic histogram, based on the vega-lite docs
https://vega.github.io/vega-lite/examples/histogram.html
"""
import altair as alt
movies = alt.load_dataset('movies')
chart = alt.Chart(movies).mark_bar().encode(
x=alt.X("IMDB_Rating",
type='quantitative',
bin=alt.BinTransform(
maxbins=10,
)),
y='count(*):Q',
)
| Python | 0 | |
0dc2417894ef1b6bd3f5386f7dfa0bb3d34a594c | Add contest calendar generation code & styles; #55 | judge/contest_calendar.py | judge/contest_calendar.py | import calendar, datetime
from judge.models import Contest, ContestParticipation, ContestProblem, Profile
class MyCal(calendar.HTMLCalendar):
def __init__(self, x):
super(MyCal, self).__init__(x)
self.today = datetime.datetime.date(datetime.datetime.now())
def formatweekday(self, day):
return '<th class="%s">%s</th>' % (self.cssclasses[day], calendar.day_name[day])
def formatday(self, day, weekday):
if day == 0:
return '<td class="noday"> </td>' # day outside month
elif day == self.today.day:
return '<td class="%s today"><span class="num">%d</span></td>' % (self.cssclasses[weekday], day)
elif day == 19:
return '<td class="%s"><span class="num">%d</span>%s</td>'
else:
c = '<ul>'
for c in Contest.objects.filter(start_time__month=self.today.month, start_time__day=day):
c += '<li class=\'%s\'><a href=\'#\'>%s</a></li>' % (
'oneday' if (c.end_time.day == day and c.end_time.month == self.today.month) else 'starting',
c.name)
for c in Contest.objects.filter(end_time__month=self.today.month, end_time__day=day):
c += '<li class=\'%s\'><a href=\'#\'>%s</a></li>' % ('ending', c.name)
c += '<ul>'
return '<td class="%s"><span class="num">%d</span>%s</td>' % (self.cssclasses[weekday], day, c)
today = datetime.datetime.date(datetime.datetime.now())
print '''
<head>
<style>
th.sun, th.mon, th.tue, th.wed, th.thu, th.fri, th.sat {
font-size:0.95em;
border-right:1px solid #aaa;
background:#f2f2f2;
}
th.sun {
border-left:1px solid #aaa;
}
td .num {
font-size:1.1em;
font-weight:bold;
display:block;
border-bottom:1px dashed #ccc;
padding-right:0.2em;
margin-bottom:0.4em;
}
td ul li a {
text-decoration: none;
color:#222;
}
td:hover ul li a {
font-weight: normal;
}
td ul li a:hover {
text-decoration: underline;
}
td ul {
text-decoration: none;
list-style-type: none;
text-align: left;
padding:0;
margin:0;
}
td ul li {
background-image: url('http://dev.ivybits.tk/images/bullet_diamond.png'); background-repeat: no-repeat;
background-position: 1px 1px;
padding-left:17px;
margin-bottom:0.2em;
}
td {
height:110px;
width:161px;
color:#000;
vertical-align:top;
text-align:right;
font-size:0.75em;
}
td {
border-right:1px solid #aaa;
border-bottom:1px solid #aaa;
transition-duration:0.2s;
}
td:hover {
background: rgba(0,0,255,0.3);
color:white;
}
td:hover .num {
font-weight: bold;
}
tr td:first-child {
border-left:1px solid #aaa;
}
th {
border-bottom:1px solid #aaa;
}
.noday {
background:#f1f1f1;
}
.today {
background: rgba(255,255,100,0.5);
}
</style></head>'''
cal = MyCal(calendar.SUNDAY)
print cal.formatmonth(today.year, today.month) | Python | 0 | |
4f1ddebb0fc185dfe4cd5167c67be8f6cea78273 | Create listenCmd.py | listenCmd.py | listenCmd.py | #!/usr/bin/python
#impoer the necessary modules
import re # the regexp module
# listen command test python file
# // THE FCNS //
# the fcn that iterate through the recognized command list to find a match with the received pseech command
def listenForCommand( theCommand ):
#for s in range( len( cmdsList ) ):
for k in commandsParams.items():
# hold the current 'loop[i]' to rung the matching process against it
#matchingCmd = re.search(r cmdsList[i], theCommand )
matchingCmd = re.search(r"say hello", theCommand )
# check if the match was successfull and end the iteraction / handle it
if matchingCmd:
print "Matching command found:"
print matchingCmd
print "Associated function:"
#print fcnsList[s]
# end the iteration as we found the command
break
else:
# continue to loop until 'cmdsList' has been fully iterated over (..)
# the settings ( commands recognized ans associated functions )
cmdsList = ["say hello", "repeat after me", "do the cleaning", "do my work"] # IndentationError: expected an indented block
fcnsList = ['sayHello', 'repeatAfterMe', 'doTheCleaning', 'doMyWork']
commandsParams = {"say hello" : "sayHello", "repeat after me" : "repeatAfterMe", "do the cleaning" : "doTheCleaning", "do my work" : "doMyWork"} # this is a dictionary
# // THE PRGM //
print "\n PRGORAM BEGIN \n"
# fake received speech on wich we iterate to find a matching command
receivedCmd = "say hello"
# try to find a match with a fake command
listenForCommand( receivedCmd )
| Python | 0.000002 | |
fd1b2885057512d6b91a2b2ed4df183e66093e61 | Create extended_iter_with_peek.py | lld_practice/extended_iter_with_peek.py | lld_practice/extended_iter_with_peek.py |
class ExtendedIter:
"""An extended iterator that wraps around an existing iterators.
It provides extra methods:
- `has_next()`: checks if we can still yield items.
- `peek()`: returns the next element of our iterator, but doesn't pass by it.
If there's nothing more to return, raises `StopIteration` error.
"""
def __init__(self, i):
self._myiter = iter(i)
self._next_element = None
self._has_next = 0
self._prime()
def has_next(self):
"""Returns true if we can call next() without raising a
StopException."""
return self._has_next
def peek(self):
"""Nonexhaustively returns the next element in our iterator."""
assert self.has_next()
return self._next_element
def next(self):
"""Returns the next element in our iterator."""
if not self._has_next:
raise StopIteration
result = self._next_element
self._prime()
return result
def _prime(self):
"""Private function to initialize the states of
self._next_element and self._has_next. We poke our
self._myiter to see if it's still alive and kicking."""
try:
self._next_element = self._myiter.next()
self._has_next = 1
except StopIteration:
self.next_element = None
self._has_next = 0
| Python | 0.000001 | |
7ce7ce4bd899e6c386de669d11a2fc5593157c91 | move processing in a mixin | pipeline/storage.py | pipeline/storage.py | import os
try:
from staticfiles import finders
from staticfiles.storage import CachedFilesMixin, StaticFilesStorage
except ImportError:
from django.contrib.staticfiles import finders
from django.contrib.staticfiles.storage import CachedFilesMixin, StaticFilesStorage
from django.core.exceptions import ImproperlyConfigured
from django.core.files.storage import get_storage_class
from django.utils.functional import LazyObject
from pipeline.conf import settings
class PipelineMixin(object):
def post_process(self, paths, dry_run=False, **options):
if dry_run:
return []
from pipeline.packager import Packager
packager = Packager(storage=self)
for package_name in packager.packages['css']:
package = packager.package_for('css', package_name)
output_file = packager.pack_stylesheets(package)
paths[output_file] = (self, output_file)
for package_name in packager.packages['js']:
package = packager.package_for('js', package_name)
output_file = packager.pack_javascripts(package)
paths[output_file] = (self, output_file)
super_class = super(PipelineMixin, self)
if hasattr(super_class, 'post_process'):
return super_class.post_process(paths, dry_run, **options)
return [
(path, path, True)
for path in paths
]
def get_available_name(self, name):
if self.exists(name):
self.delete(name)
return name
class PipelineStorage(PipelineMixin, StaticFilesStorage):
pass
class PipelineCachedStorage(CachedFilesMixin, PipelineStorage):
pass
class BaseFinderStorage(PipelineStorage):
finders = None
def __init__(self, finders=None, *args, **kwargs):
if finders is not None:
self.finders = finders
if self.finders is None:
raise ImproperlyConfigured("The storage %r doesn't have a finders class assigned." % self.__class__)
super(BaseFinderStorage, self).__init__(*args, **kwargs)
def path(self, name):
path = self.finders.find(name)
if not path:
path = super(BaseFinderStorage, self).path(name)
return path
def exists(self, name):
exists = self.finders.find(name) != None
if not exists:
exists = super(BaseFinderStorage, self).exists(name)
return exists
def listdir(self, path):
for finder in finders.get_finders():
for storage in finder.storages.values():
try:
return storage.listdir(path)
except OSError:
pass
def _save(self, name, content):
for finder in finders.get_finders():
for path, storage in finder.list([]):
if os.path.dirname(name) in path:
return storage._save(name, content)
class PipelineFinderStorage(BaseFinderStorage):
finders = finders
class DefaultStorage(LazyObject):
def _setup(self):
self._wrapped = get_storage_class(settings.PIPELINE_STORAGE)()
default_storage = DefaultStorage()
| import os
try:
from staticfiles import finders
from staticfiles.storage import CachedStaticFilesStorage, StaticFilesStorage
except ImportError:
from django.contrib.staticfiles import finders
from django.contrib.staticfiles.storage import CachedStaticFilesStorage, StaticFilesStorage
from django.core.exceptions import ImproperlyConfigured
from django.core.files.storage import get_storage_class
from django.utils.functional import LazyObject
from pipeline.conf import settings
class PipelineStorage(StaticFilesStorage):
def get_available_name(self, name):
if self.exists(name):
self.delete(name)
return name
def post_process(self, paths, dry_run=False, **options):
from pipeline.packager import Packager
if dry_run:
return []
packager = Packager(storage=self)
for package_name in packager.packages['css']:
package = packager.package_for('css', package_name)
output_file = packager.pack_stylesheets(package)
paths[output_file] = (self, output_file)
for package_name in packager.packages['js']:
package = packager.package_for('js', package_name)
output_file = packager.pack_javascripts(package)
paths[output_file] = (self, output_file)
super_class = super(PipelineStorage, self)
if hasattr(super_class, 'post_process'):
return super_class.post_process(paths, dry_run, **options)
return [
(path, path, True)
for path in paths
]
class PipelineCachedStorage(PipelineStorage, CachedStaticFilesStorage):
def post_process(self, paths, dry_run=False, **options):
from pipeline.packager import Packager
packager = Packager(storage=self)
if dry_run:
for asset_type in ['css', 'js']:
for package_name in packager.packages[asset_type]:
package = packager.package_for('js', package_name)
paths[package.output_filename] = (self, package.output_filename)
self.cache.delete_many([self.cache_key(path) for path in paths])
return []
return super(PipelineCachedStorage, self).post_process(paths, dry_run, **options)
class BaseFinderStorage(PipelineStorage):
finders = None
def __init__(self, finders=None, *args, **kwargs):
if finders is not None:
self.finders = finders
if self.finders is None:
raise ImproperlyConfigured("The storage %r doesn't have a finders class assigned." % self.__class__)
super(BaseFinderStorage, self).__init__(*args, **kwargs)
def path(self, name):
path = self.finders.find(name)
if not path:
path = super(BaseFinderStorage, self).path(name)
return path
def exists(self, name):
exists = self.finders.find(name) != None
if not exists:
exists = super(BaseFinderStorage, self).exists(name)
return exists
def listdir(self, path):
for finder in finders.get_finders():
for storage in finder.storages.values():
try:
return storage.listdir(path)
except OSError:
pass
def _save(self, name, content):
for finder in finders.get_finders():
for path, storage in finder.list([]):
if os.path.dirname(name) in path:
return storage._save(name, content)
class PipelineFinderStorage(BaseFinderStorage):
finders = finders
class DefaultStorage(LazyObject):
def _setup(self):
self._wrapped = get_storage_class(settings.PIPELINE_STORAGE)()
default_storage = DefaultStorage()
| Python | 0 |
77e980157f51af421eceb7c7b7a84945d8d33a91 | Convert caffemodel of FCN8s to chainer model | scripts/caffe_to_chainermodel.py | scripts/caffe_to_chainermodel.py | #!/usr/bin/env python
from __future__ import print_function
import argparse
import os.path as osp
import caffe
import chainer.functions as F
import chainer.serializers as S
import fcn
from fcn.models import FCN8s
data_dir = fcn.get_data_dir()
caffemodel = osp.join(data_dir, 'voc-fcn8s/fcn8s-heavy-pascal.caffemodel')
caffe_prototxt = osp.join(data_dir, 'voc-fcn8s/deploy.prototxt')
chainermodel = osp.join(data_dir, 'fcn8s.chainermodel')
net = caffe.Net(caffe_prototxt, caffemodel, caffe.TEST)
# TODO(pfnet): chainer CaffeFunction not support some layers
# from chainer.functions.caffe import CaffeFunction
# func = CaffeFunction(caffemodel)
model = FCN8s()
for name, param in net.params.iteritems():
layer = getattr(model, name)
has_bias = True
if len(param) == 1:
has_bias = False
print('{0}:'.format(name))
# weight
print(' - W:', param[0].data.shape, layer.W.data.shape)
assert param[0].data.shape == layer.W.data.shape
layer.W.data = param[0].data
# bias
if has_bias:
print(' - b:', param[1].data.shape, layer.b.data.shape)
assert param[1].data.shape == layer.b.data.shape
layer.b.data = param[1].data
S.save_hdf5(chainermodel, model)
| Python | 0.999999 | |
a8423d5759a951b7f8d765203e3a02a6d3211f35 | add body task generator | neurolabi/python/flyem/BodyTaskManager.py | neurolabi/python/flyem/BodyTaskManager.py | '''
Created on Sep 18, 2013
@author: zhaot
'''
import os;
class ExtractBodyTaskManager:
'''
classdocs
'''
def __init__(self):
'''
Constructor
'''
self.commandPath = '';
self.minSize = 0;
self.maxSize = -1;
self.overwriteLevel = 1
self.zOffset = 0
self.bodyMapDir = ''
self.output = ''
self.bodysizeFile = ''
self.jobNumber = 5
def setCommandPath(self, path):
self.commandPath = path;
def setRange(self, bodySizeRange):
self.minSize = bodySizeRange[0];
self.maxSize = bodySizeRange[1];
def setOverwriteLevel(self, level):
self.overwriteLevel = level;
def setZOffset(self, offset):
self.zOffset = offset;
def setJobNumber(self, n):
self.jobNumber = n;
def setOutput(self, output):
self.output = output;
def setBodyMapDir(self, inputBodyMap):
self.bodyMapDir = inputBodyMap
def setBodySizeFile(self, filePath):
self.bodysizeFile = filePath
def useCluster(self, using):
self.usingCluster = using;
def getFullCommand(self):
command = self.commandPath + ' ' + self.bodyMapDir + ' -o ' + self.output + \
' --sobj ' + ' --minsize ' + str(self.minSize);
if self.maxSize >= self.minSize:
command += ' --maxsize ' + str(self.maxSize)
command += ' --overwrite_level ' + str(self.overwriteLevel);
if self.bodysizeFile:
command += ' --bodysize_file ' + self.bodysizeFile
command += ' --z_offset ' + str(self.zOffset)
return command;
def generateScript(self, outputDir):
script = self.output
scriptFile = open(script, 'w')
if scriptFile:
scriptFile.write(self.getFullCommand())
scriptFile.close()
if __name__ == '__main__':
from os.path import expanduser
home = expanduser("~")
taskManager = ExtractBodyTaskManager()
taskManager.setBodyMapDir('../body_maps')
taskManager.setOutput('.')
taskManager.setRange([100000, -1])
taskManager.setOverwriteLevel(1)
taskManager.setBodySizeFile('bodysize.txt')
taskManager.setZOffset(1490)
taskManager.setCommandPath(home + '/Work/neutube/neurolabi/cpp/'
'extract_body-build-Qt_4_8_1_gcc-Debug/extract_body');
print taskManager.getFullCommand();
| Python | 0.000008 | |
18935881745b7bc65741837d63ec60e9d62583f1 | Split the big file into smaller pieces | face_track/sound_track.py | face_track/sound_track.py | #
# sound_track.py - Tracking of sound sources
# Copyright (C) 2014,2015,2016 Hanson Robotics
# Copyright (C) 2015,2016 Linas Vepstas
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import rospy
import logging
from std_msgs.msg import Int32
from face_atomic import FaceAtomic
from geometry_msgs.msg import PoseStamped # for sound localization
logger = logging.getLogger('hr.eva_behavior.sound_track')
# Thin python wrapper, to subscribe to ManyEars sound-source ROS
# messages, and then re-wrap these as opencog atoms, via FaceAtomic,
#a and forward them on into the OpenCog space-time server.
#
class SoundTrack:
def __init__(self):
rospy.init_node("OpenCog_Facetracker")
logger.info("Starting OpenCog Face Tracker ROS Node")
# The OpenCog API. This is used to send face data to OpenCog.
self.atomo = FaceAtomic()
# Sound localization
parameter_name = "sound_localization/mapping_matrix"
if rospy.has_param(parameter_name):
self.sl_matrix = rospy.get_param(parameter_name)
rospy.Subscriber("/manyears/source_pose", PoseStamped, \
self.sound_cb)
# ---------------------------------------------------------------
# Store the location of the strongest sound-source in the
# OpenCog space server. This data arrives at a rate of about
# 30 Hz, currently, from ManyEars.
def sound_cb(self, msg):
# Convert to camera coordinates, using an affine matrix
# (which combines a rotation and translation).
#
# A typical sl_matrix looks like this:
#
# 0.943789 0.129327 0.304204 0.00736024
# -0.131484 0.991228 -0.0134787 0.00895614
# -0.303278 -0.0272767 0.952513 0.0272001
# 0 0 0 1
#
vs = [msg.pose.position.x, \
msg.pose.position.y, \
msg.pose.position.z, \
1]
r = [0, 0, 0, 0]
for i in range(0,3):
for j in range(0,3):
r[i] += self.sl_matrix[i][j] * vs[j]
self.atomo.update_sound(r[0], r[1], r[2])
# ----------------------------------------------------------
| Python | 0.999531 | |
b41444b5f7c48c4bc46a49405f7b053dcb8ea66c | rename resource function | into/backends/sas.py | into/backends/sas.py | from __future__ import absolute_import, division, print_function
import sas7bdat
from sas7bdat import SAS7BDAT
import datashape
from datashape import discover, dshape
from collections import Iterator
import pandas as pd
import sqlalchemy as sa
from .sql import dshape_to_alchemy, dshape_to_table
from ..append import append
from ..convert import convert
from ..resource import resource
SAS_type_map = {'number': 'float64',
'string': 'string'}
@resource.register('.+\.(sas7bdat)')
def resource_sas(uri, **kwargs):
return SAS7BDAT(uri, **kwargs)
@discover.register(SAS7BDAT)
def discover_sas(f, **kwargs):
cols = [col.name.decode("utf-8") for col in f.header.parent.columns]
types = [SAS_type_map[col.type] for col in f.header.parent.columns]
measure = ",".join(col + ":" + _type for col, _type in zip(cols, types))
ds = "var * {" + measure + "}"
return dshape(ds)
@convert.register(pd.DataFrame, SAS7BDAT, cost=4.0)
def sas_to_DataFrame(s, dshape=None, **kwargs):
return s.to_data_frame()
@convert.register(list, SAS7BDAT, cost=8.0)
def sas_to_list(s, dshape=None, **kwargs):
s.skip_header = True
return list(s.readlines())
@convert.register(Iterator, SAS7BDAT, cost=1.0)
def sas_to_iterator(s):
s.skip_header = True
return s.readlines()
@append.register(sa.Table, SAS7BDAT)
def append_sas_to_table(t, s, **kwargs):
append(t, sas_to_iterator(s), **kwargs)
def sas_to_table(s, metadata=None):
ds = discover_sas(s)
name = s.header.properties.name.decode("utf-8")
return dshape_to_table(name, ds, metadata)
| from __future__ import absolute_import, division, print_function
import sas7bdat
from sas7bdat import SAS7BDAT
import datashape
from datashape import discover, dshape
from collections import Iterator
import pandas as pd
import sqlalchemy as sa
from .sql import dshape_to_alchemy, dshape_to_table
from ..append import append
from ..convert import convert
from ..resource import resource
SAS_type_map = {'number': 'float64',
'string': 'string'}
@resource.register('.+\.(sas7bdat)')
def resource_csv(uri, **kwargs):
return SAS7BDAT(uri, **kwargs)
@discover.register(SAS7BDAT)
def discover_sas(f, **kwargs):
cols = [col.name.decode("utf-8") for col in f.header.parent.columns]
types = [SAS_type_map[col.type] for col in f.header.parent.columns]
measure = ",".join(col + ":" + _type for col, _type in zip(cols, types))
ds = "var * {" + measure + "}"
return dshape(ds)
@convert.register(pd.DataFrame, SAS7BDAT, cost=4.0)
def sas_to_DataFrame(s, dshape=None, **kwargs):
return s.to_data_frame()
@convert.register(list, SAS7BDAT, cost=8.0)
def sas_to_list(s, dshape=None, **kwargs):
s.skip_header = True
return list(s.readlines())
@convert.register(Iterator, SAS7BDAT, cost=1.0)
def sas_to_iterator(s):
s.skip_header = True
return s.readlines()
@append.register(sa.Table, SAS7BDAT)
def append_sas_to_table(t, s, **kwargs):
append(t, sas_to_iterator(s), **kwargs)
def sas_to_table(s, metadata=None):
ds = discover_sas(s)
name = s.header.properties.name.decode("utf-8")
return dshape_to_table(name, ds, metadata)
| Python | 0.000005 |
4e1d611a06874d478e91185a0349cfc3747e36ab | Create __init__.py | bin/map/__init__.py | bin/map/__init__.py | Python | 0.000429 | ||
7f4079c30bf5a693f1ccad38109bbfc83a076f22 | Add palette utilities | bingraphvis/util.py | bingraphvis/util.py | #generated using palettable
PALETTES = {
'grays' : ['#FFFFFD', '#D6D6D4', '#B1B1B0', '#908F8F', '#727171', '#545453', '#373737', '#1A1919', '#000000'],
'greens' : ['#F7FCF5', '#E5F5E0', '#C7E9C0', '#A1D99B', '#74C476', '#41AB5D', '#238B45', '#006D2C', '#00441B'],
'purples': ['#FCFBFD', '#EFEDF5', '#DADAEB', '#BCBDDC', '#9E9AC8', '#807DBA', '#6A51A3', '#54278F', '#3F007D'],
'blues' : ['#F7FBFF', '#DEEBF7', '#C6DBEF', '#9ECAE1', '#6BAED6', '#4292C6', '#2171B5', '#08519C', '#08306B'],
'reds' : ['#FFF5F0', '#FEE0D2', '#FCBBA1', '#FC9272', '#FB6A4A', '#EF3B2C', '#CB181D', '#A50F15', '#67000D']
}
try:
from palettable.colorbrewer.sequential import *
from palettable.cmocean.sequential import *
PALETTES.update({
'greens' : Greens_9.hex_colors,
'blues' : Blues_9.hex_colors,
'purples': Purples_9.hex_colors,
'reds' : Reds_9.hex_colors,
'grays' : Gray_9_r.hex_colors,
'algae' : Algae_8.hex_colors,
'solar' : Solar_9_r.hex_colors
})
except Exception,e:
print e
pass
print PALETTES
def get_palette(name):
return PALETTES[name]
def get_palette_names():
return PALETTES.keys()
| Python | 0 | |
aed1f0e4e33dd956f4499ecffd6bf50bb58e7df4 | Add fermi.py | scripts/fermi.py | scripts/fermi.py | # This example file is part of the ENVISIoN Electronic structure visualization studio
#
# Load this file into the Inviwo Python Editor (which you can access under the menu Python,
# which is available if Inviwo has been compiled with the Python module on)
#
# For Copyright and License information see the file LICENSE distributed alongside ENVISIoN
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import os, sys
# Configuration
PATH_TO_ENVISION=os.path.expanduser("~/ENVISIoN/envision")
PATH_TO_VASP_CALC=os.path.expanduser("~/ENVISIoN/data/Cu/1/11")
PATH_TO_HDF5=os.path.expanduser("/tmp/envision_demo.hdf5")
sys.path.insert(0, os.path.expanduser(PATH_TO_ENVISION)) # Or `pip install --editable`.
import envision
import envision.inviwo
envision.parser.vasp.fermi(PATH_TO_HDF5, PATH_TO_VASP_CALC)
xpos=0
envision.inviwo.fermi(PATH_TO_HDF5, xpos)
| Python | 0.000022 | |
9ffa7ab2b4b5fb03d9cd8dd2740234ebaf8c8097 | Add per client ignore exception option. | redis_cache/cache.py | redis_cache/cache.py | # -*- coding: utf-8 -*-
from django.conf import settings
from django.core.cache.backends.base import BaseCache
from django.core.exceptions import ImproperlyConfigured
from django.core.cache import get_cache
from .util import load_class
from .exceptions import ConnectionInterrupted
import functools
DJANGO_REDIS_IGNORE_EXCEPTIONS = getattr(settings,
"DJANGO_REDIS_IGNORE_EXCEPTIONS", False)
def omit_exception(method):
"""
Simple decorator that intercepts connection
errors and ignores these if settings specify this.
Note: this doesn't handle the `default` argument in .get().
"""
@functools.wraps(method)
def _decorator(self, *args, **kwargs):
if self._ignore_exceptions:
try:
return method(self, *args, **kwargs)
except ConnectionInterrupted:
return None
else:
return method(self, *args, **kwargs)
return _decorator
class RedisCache(BaseCache):
def __init__(self, server, params):
super(RedisCache, self).__init__(params)
self._server = server
self._params = params
options = params.get("OPTIONS", {})
self._client_cls = options.get("CLIENT_CLASS", "redis_cache.client.DefaultClient")
self._client_cls = load_class(self._client_cls)
self._client = None
self._ignore_exceptions = options.get("IGNORE_EXCEPTIONS", DJANGO_REDIS_IGNORE_EXCEPTIONS)
@property
def client(self):
"""
Lazy client connection property.
"""
if self._client is None:
self._client = self._client_cls(self._server, self._params, self)
return self._client
@property
def raw_client(self):
"""
Return a raw redis client (connection). Not all
pluggable clients supports this feature. If not supports
this raises NotImplementedError
"""
return self.client.get_client(write=True)
@omit_exception
def set(self, *args, **kwargs):
return self.client.set(*args, **kwargs)
@omit_exception
def incr_version(self, *args, **kwargs):
return self.client.incr_version(*args, **kwargs)
@omit_exception
def add(self, *args, **kwargs):
return self.client.add(*args, **kwargs)
@omit_exception
def get(self, key, default=None, version=None, client=None):
try:
return self.client.get(key, default=default, version=version,
client=client)
except ConnectionInterrupted:
if DJANGO_REDIS_IGNORE_EXCEPTIONS:
return default
raise
@omit_exception
def delete(self, *args, **kwargs):
return self.client.delete(*args, **kwargs)
@omit_exception
def delete_pattern(self, *args, **kwargs):
return self.client.delete_pattern(*args, **kwargs)
@omit_exception
def delete_many(self, *args, **kwargs):
return self.client.delete_many(*args, **kwargs)
@omit_exception
def clear(self):
return self.client.clear()
@omit_exception
def get_many(self, *args, **kwargs):
return self.client.get_many(*args, **kwargs)
@omit_exception
def set_many(self, *args, **kwargs):
return self.client.set_many(*args, **kwargs)
@omit_exception
def incr(self, *args, **kwargs):
return self.client.incr(*args, **kwargs)
@omit_exception
def decr(self, *args, **kwargs):
return self.client.decr(*args, **kwargs)
@omit_exception
def has_key(self, *args, **kwargs):
return self.client.has_key(*args, **kwargs)
@omit_exception
def keys(self, *args, **kwargs):
return self.client.keys(*args, **kwargs)
@omit_exception
def close(self, **kwargs):
self.client.close(**kwargs)
| # -*- coding: utf-8 -*-
from django.conf import settings
from django.core.cache.backends.base import BaseCache
from django.core.exceptions import ImproperlyConfigured
from django.core.cache import get_cache
from .util import load_class
from .exceptions import ConnectionInterrupted
import functools
DJANGO_REDIS_IGNORE_EXCEPTIONS = getattr(settings,
"DJANGO_REDIS_IGNORE_EXCEPTIONS", False)
def omit_exception(method):
"""
Simple decorator that intercepts connection
errors and ignores these if settings specify this.
Note: this doesn't handle the `default` argument in .get().
"""
if not DJANGO_REDIS_IGNORE_EXCEPTIONS:
return method
@functools.wraps(method)
def _decorator(self, *args, **kwargs):
try:
return method(self, *args, **kwargs)
except ConnectionInterrupted:
return None
return _decorator
class RedisCache(BaseCache):
def __init__(self, server, params):
super(RedisCache, self).__init__(params)
self._server = server
self._params = params
options = params.get("OPTIONS", {})
self._client_cls = options.get("CLIENT_CLASS", "redis_cache.client.DefaultClient")
self._client_cls = load_class(self._client_cls)
self._client = None
@property
def client(self):
"""
Lazy client connection property.
"""
if self._client is None:
self._client = self._client_cls(self._server, self._params, self)
return self._client
@property
def raw_client(self):
"""
Return a raw redis client (connection). Not all
pluggable clients supports this feature. If not supports
this raises NotImplementedError
"""
return self.client.get_client(write=True)
@omit_exception
def set(self, *args, **kwargs):
return self.client.set(*args, **kwargs)
@omit_exception
def incr_version(self, *args, **kwargs):
return self.client.incr_version(*args, **kwargs)
@omit_exception
def add(self, *args, **kwargs):
return self.client.add(*args, **kwargs)
@omit_exception
def get(self, key, default=None, version=None, client=None):
try:
return self.client.get(key, default=default, version=version,
client=client)
except ConnectionInterrupted:
if DJANGO_REDIS_IGNORE_EXCEPTIONS:
return default
raise
@omit_exception
def delete(self, *args, **kwargs):
return self.client.delete(*args, **kwargs)
@omit_exception
def delete_pattern(self, *args, **kwargs):
return self.client.delete_pattern(*args, **kwargs)
@omit_exception
def delete_many(self, *args, **kwargs):
return self.client.delete_many(*args, **kwargs)
@omit_exception
def clear(self):
return self.client.clear()
@omit_exception
def get_many(self, *args, **kwargs):
return self.client.get_many(*args, **kwargs)
@omit_exception
def set_many(self, *args, **kwargs):
return self.client.set_many(*args, **kwargs)
@omit_exception
def incr(self, *args, **kwargs):
return self.client.incr(*args, **kwargs)
@omit_exception
def decr(self, *args, **kwargs):
return self.client.decr(*args, **kwargs)
@omit_exception
def has_key(self, *args, **kwargs):
return self.client.has_key(*args, **kwargs)
@omit_exception
def keys(self, *args, **kwargs):
return self.client.keys(*args, **kwargs)
@omit_exception
def close(self, **kwargs):
self.client.close(**kwargs)
| Python | 0 |
b4fd94008fa5b1dcdb6dd61651d8776dfb41f2d6 | Make sure we return a list. | oscar/apps/dashboard/catalogue/widgets.py | oscar/apps/dashboard/catalogue/widgets.py | import six
from django.forms.util import flatatt
from django.core.urlresolvers import reverse
from django.utils.safestring import mark_safe
from django import forms
class ProductSelect(forms.Widget):
is_multiple = False
css = 'select2 input-xlarge'
def format_value(self, value):
return six.text_type(value or '')
def value_from_datadict(self, data, files, name):
value = data.get(name, None)
if value is None:
return value
else:
return six.text_type(value)
def render(self, name, value, attrs=None, choices=()):
attrs = self.build_attrs(attrs, **{
'type': 'hidden',
'class': self.css,
'name': name,
'data-ajax-url': reverse('dashboard:catalogue-product-lookup'),
'data-multiple': 'multiple' if self.is_multiple else '',
'value': self.format_value(value),
'data-required': 'required' if self.is_required else '',
})
return mark_safe(u'<input %s>' % flatatt(attrs))
class ProductSelectMultiple(ProductSelect):
is_multiple = True
css = 'select2 input-xxlarge'
def format_value(self, value):
if value:
return ','.join(map(six.text_type, filter(bool, value)))
else:
return ''
def value_from_datadict(self, data, files, name):
value = data.get(name, None)
if value is None:
return []
else:
return list(filter(bool, value.split(',')))
| import six
from django.forms.util import flatatt
from django.core.urlresolvers import reverse
from django.utils.safestring import mark_safe
from django import forms
class ProductSelect(forms.Widget):
is_multiple = False
css = 'select2 input-xlarge'
def format_value(self, value):
return six.text_type(value or '')
def value_from_datadict(self, data, files, name):
value = data.get(name, None)
if value is None:
return value
else:
return six.text_type(value)
def render(self, name, value, attrs=None, choices=()):
attrs = self.build_attrs(attrs, **{
'type': 'hidden',
'class': self.css,
'name': name,
'data-ajax-url': reverse('dashboard:catalogue-product-lookup'),
'data-multiple': 'multiple' if self.is_multiple else '',
'value': self.format_value(value),
'data-required': 'required' if self.is_required else '',
})
return mark_safe(u'<input %s>' % flatatt(attrs))
class ProductSelectMultiple(ProductSelect):
is_multiple = True
css = 'select2 input-xxlarge'
def format_value(self, value):
if value:
return ','.join(map(six.text_type, filter(bool, value)))
else:
return ''
def value_from_datadict(self, data, files, name):
value = data.get(name, None)
if value is None:
return []
else:
return filter(bool, value.split(','))
| Python | 0.000272 |
7e449b0267f47ee08327d9d76976c5e1b197501b | Add missing migration (#9504) | osf/migrations/0219_auto_20201020_1836.py | osf/migrations/0219_auto_20201020_1836.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.28 on 2020-10-20 18:36
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('osf', '0218_auto_20200929_1850'),
]
operations = [
migrations.AlterField(
model_name='draftregistration',
name='machine_state',
field=models.CharField(choices=[('initial', 'Initial'), ('pending', 'Pending'), ('accepted', 'Accepted'), ('rejected', 'Rejected'), ('withdrawn', 'Withdrawn'), ('pending_embargo', 'Pending_Embargo'), ('embargo', 'Embargo'), ('pending_embargo_termination', 'Pending_Embargo_Termination'), ('pending_withdraw_request', 'Pending_Withdraw_Request'), ('pending_withdraw', 'Pending_Withdraw')], db_index=True, default='initial', max_length=30),
),
migrations.AlterField(
model_name='registrationaction',
name='from_state',
field=models.CharField(choices=[('initial', 'Initial'), ('pending', 'Pending'), ('accepted', 'Accepted'), ('rejected', 'Rejected'), ('withdrawn', 'Withdrawn'), ('pending_embargo', 'Pending_Embargo'), ('embargo', 'Embargo'), ('pending_embargo_termination', 'Pending_Embargo_Termination'), ('pending_withdraw_request', 'Pending_Withdraw_Request'), ('pending_withdraw', 'Pending_Withdraw')], max_length=31),
),
migrations.AlterField(
model_name='registrationaction',
name='to_state',
field=models.CharField(choices=[('initial', 'Initial'), ('pending', 'Pending'), ('accepted', 'Accepted'), ('rejected', 'Rejected'), ('withdrawn', 'Withdrawn'), ('pending_embargo', 'Pending_Embargo'), ('embargo', 'Embargo'), ('pending_embargo_termination', 'Pending_Embargo_Termination'), ('pending_withdraw_request', 'Pending_Withdraw_Request'), ('pending_withdraw', 'Pending_Withdraw')], max_length=31),
),
migrations.AlterField(
model_name='registrationaction',
name='trigger',
field=models.CharField(choices=[('submit', 'Submit'), ('accept', 'Accept'), ('reject', 'Reject'), ('edit_comment', 'Edit_Comment'), ('embargo', 'Embargo'), ('withdraw', 'Withdraw'), ('request_withdraw', 'Request_Withdraw'), ('withdraw_request_fails', 'Withdraw_Request_Fails'), ('withdraw_request_pass', 'Withdraw_Request_Pass'), ('reject_withdraw', 'Reject_Withdraw'), ('force_withdraw', 'Force_Withdraw'), ('request_embargo', 'Request_Embargo'), ('request_embargo_termination', 'Request_Embargo_Termination'), ('terminate_embargo', 'Terminate_Embargo')], max_length=31),
),
]
| Python | 0.000002 | |
c137028a98cd762a4e93950fbde085969500999e | Build tagger | installer/build_tag.py | installer/build_tag.py | #!/usr/python
import os
from subprocess import call, check_output
ver = check_output([ "python", "version.py", "../apps/Tasks/src/version.h",
"PROGRAM_VERSION_MAJOR,PROGRAM_VERSION_MINOR,PROGRAM_VERSION_PATCH,PROGRAM_VERSION_BUILD",
"PROGRAM_VERSION_BUILD"])
VERSION = ver.strip()
call(["git","add","../apps/Tasks/src/version.h"])
call(["git","commit","-m","Build for version %s" % VERSION])
call(["git","tag","tag-build-v%s" % VERSION]) | Python | 0.000001 | |
866b1c634c4fc6dc27ad953ccde6b6dcd11dcc91 | Add mood light script | moodlight.py | moodlight.py | from maya.utils import executeDeferred
import pymel.core as pm
import threading
import time
_active_mood_light = None
_running = False
class MoodLightThread(threading.Thread):
def __init__(self, speed):
self.speed = speed
super(MoodLightThread, self).__init__()
def run(self):
while _running:
time.sleep(0.05)
color = pm.dt.Color()
hue = time.time() * self.speed % 1 * 360
color.set('HSV', hue, 1, 0.3)
executeDeferred(
pm.mel.displayRGBColor,
'backgroundBottom',
color.r,
color.g,
color.b
)
color.set('HSV', hue, 0.3, 1)
executeDeferred(
pm.mel.displayRGBColor,
'backgroundTop',
color.r,
color.g,
color.b
)
def is_running():
global _active_mood_light, _running
return _active_mood_light is not None and _running
def start(speed=0.05):
global _active_mood_light, _running
stop()
_running = True
_active_mood_light = MoodLightThread(speed)
_active_mood_light.start()
def stop():
global _active_mood_light, _running
if is_running():
_running = False
_active_mood_light.join()
_active_mood_light = None | Python | 0.000001 | |
b0d699066799d0309e7af3f8892f56a6feaac778 | Write tests for new functionality; several destinations | new_tests.py | new_tests.py | from numpy import testing
import unittest
import numpy as np
from numpy import pi
from robot_arm import RobotArm
class TestRobotArm(unittest.TestCase):
def setUp(self):
self.lengths = (3, 2, 2,)
self.destinations = (
(5, 0,),
(4, 2,),
(6, 0.5),
(4, -2),
(5, -1),
)
self.theta = (pi, pi/2, 0,)
def test_init_all_arguments(self):
RobotArm(self.lengths, self.destinations, self.theta)
def test_init_without_theta(self):
RobotArm(self.lengths, self.destinations)
def test_wrong_lengths_type(self):
self.assertRaises(
AssertionError,
RobotArm,
np.array(self.lengths),
self.destinations,
self.theta)
def test_wrong_destinations_type(self):
self.assertRaises(
AssertionError,
RobotArm,
self.lengths,
np.array(self.destinations),
self.theta)
def test_wrong_theta_type(self):
self.assertRaises(
AssertionError,
RobotArm,
self.lengths,
self.destinations,
np.array(self.theta))
| Python | 0.000001 | |
50f698c2fdd90bc4b3e60a583c196381fc23e099 | Implement a rudimentary API for LLTK | lltk-restful/base.py | lltk-restful/base.py | #!/usr/bin/python
# -*- coding: UTF-8 -*-
import lltk
import lltk.generic
import lltk.caching
import lltk.exceptions
from flask import Flask
from flask import jsonify, request
__author__ = 'Markus Beuckelmann'
__author_email__ = 'email@markus-beuckelmann.de'
__version__ = '0.1.0'
DEBUG = True
CACHING = True
NAME = 'lltk-restful'
HOST = '127.0.0.1'
PORT = 5000
app = Flask(NAME)
if DEBUG:
app.debug = True
lltk.config['debug'] = True
if not CACHING:
lltk.caching.disable()
@app.route('/lltk/<string:language>/<string:method>/<string:word>', methods = ['GET'])
@app.route('/lltk/<string:language>/<string:method>/<path:extraargs>/<string:word>', methods = ['GET'])
def lltkapi(language, method, word, extraargs = tuple()):
''' Returns LLTK's results as a JSON document. '''
data = dict()
data['language'] = language
data['method'] = method
data['word'] = word
data['result'] = None
if hasattr(lltk.generic, method) and callable(getattr(lltk.generic, method)):
function = getattr(lltk.generic, method)
if not isinstance(extraargs, tuple):
extraargs = tuple(extraargs.split('/'))
kwargs = request.args.to_dict()
data['result'] = function(language, word, *extraargs, **kwargs)
else:
return http_404(NotImplementedError)
return jsonify(data)
if __name__ == '__main__':
app.run(
host = HOST,
port = PORT
)
| Python | 0.000034 | |
aef4998354ee5872557392be4bc635e015e5d76d | add serial decoder | serialDecoder.py | serialDecoder.py | #!/usr/bin/python2.7
import signal
import sys
import time
import serial
import io
import getopt
interval = '0.1'
device = '/dev/cu.usbserial'
try:
port=serial.Serial(port=device,
baudrate=2400,
bytesize=serial.EIGHTBITS,
stopbits=serial.STOPBITS_ONE,
parity=serial.PARITY_NONE,
timeout=None)
if not port.isOpen():
port.open()
except IOError,err:
print '\nError:' + str(err) + '\n'
sys.exit(1)
## Close open port gracefully
def closePort(signal, frame):
sys.stderr.write('\n\nYou pressed Ctrl+C!\n\n')
port.flushInput()
port.close()
sys.exit(0)
signal.signal(signal.SIGINT, closePort)
# Every packet is 14 bytes long.
def getPacket():
i = 0
substr = ''
while i<14:
byte = port.read(1)
# converting every byte to binary format keeping the low nibble.
substr += '{0:08b}'.format(ord(byte))[4:]
i += 1
return substr
def stream_decode(substr):
ac = int(substr[0:1])
dc = int(substr[1:2])
auto = int(substr[2:3])
pclink = substr[3:4]
minus = int(substr[4:5])
digit1 = substr[5:12]
dot1 = int(substr[12:13])
digit2 = substr[13:20]
dot2 = int(substr[20:21])
digit3 = substr[21:28]
dot3 = int(substr[28:29])
digit4 = substr[29:36]
micro = int(substr[36:37])
nano = int(substr[37:38])
kilo = int(substr[38:39])
diotst = int(substr[39:40])
mili = int(substr[40:41])
percent = int(substr[41:42])
mega = int(substr[42:43])
contst = int(substr[43:44])
cap = int(substr[44:45])
ohm = int(substr[45:46])
rel = int(substr[46:47])
hold = int(substr[47:48])
amp = int(substr[48:49])
volts = int(substr[49:50])
hertz = int(substr[50:51])
lowbat = int(substr[51:52])
minm = int(substr[52:53])
fahrenh = substr[53:54]
celcius = int(substr[54:55])
maxm = int(substr[55:56])
digit = {"1111101":"0",
"0000101":"1",
"1011011":"2",
"0011111":"3",
"0100111":"4",
"0111110":"5",
"1111110":"6",
"0010101":"7",
"1111111":"8",
"0111111":"9",
"0000000":"",
"1101000":"L"}
valueStr = ("-" if minus else " ") +\
digit.get(digit1,"") + ("." if dot1 else "") +\
digit.get(digit2,"") + ("." if dot2 else "") +\
digit.get(digit3,"") + ("." if dot3 else "") +\
digit.get(digit4,"")
try:
valueNum = float(valueStr)
except ValueError:
valueNum = None
flags = ",".join(["AC" if ac else "",
"DC" if dc else "",
"Auto" if auto else "",
"Diode test" if diotst else "",
"Conti test" if contst else "",
"Capacity" if cap else "",
"Rel" if rel else "",
"Hold" if hold else "",
"Min" if minm else "",
"Max" if maxm else "",
"LowBat" if lowbat else ""])
if valueNum == None:
pass
elif nano:
valueNum *= 10e-9
elif micro:
valueNum *= 10e-6
elif mili:
valueNum *= 10e-3
elif kilo:
valueNum *= 10e3
elif mega:
valueNum *= 10e6
units = ("%" if percent else "") +\
("Ohm" if ohm else "") +\
("Amp" if amp else "") +\
("Volt" if volts else "") +\
("Hz" if hertz else "") +\
("C" if celcius else "")
return (valueNum, units, flags)
#while 1:
# substr = getPacket()
# data = stream_decode(substr)
# print data
# time.sleep(float(interval))
# port.flushInput()
| Python | 0.000007 | |
04f19b29c79e1ab624d7ce596730ad9b4fd500fd | add lcdb.helpers.py | lcdb/helpers.py | lcdb/helpers.py | import yaml
from jsonschema import validate, ValidationError
def validate_config(config, schema):
schema = yaml.load(open(schema))
cfg = yaml.load(open(config))
try:
validate(cfg, schema)
except ValidationError as e:
msg = '\nPlease fix %s: %s\n' % (config, e.message)
raise ValidationError(msg)
| Python | 0.001346 | |
f4944256092b085b1546eaec114e0987da6697bc | add simple cli client | instapaper_cli.py | instapaper_cli.py | #!/opt/local/bin/python2.6
from instapaper import Instapaper
from optparse import OptionParser
from getpass import getpass
def usage():
print "Usage: instapaper.py [-h] username password url"
print "Options:"
print "-h Print this help"
def main():
# initialize parser
usage = "usage: %prog -u USER [-t TITLE] url"
parser = OptionParser(usage)
parser.add_option("-u", "--user", action="store", dest="user",metavar="USER",
help="instapaper username")
parser.add_option("-t", "--title", action="store", dest="title",metavar="TITLE",
help="title of the link to add")
(options, args) = parser.parse_args()
if not options.user:
parser.error("No instapaper user given.")
else:
title = ""
if options.title:
title = options.title
pw = getpass()
inst = Instapaper(options.user,pw)
result = inst.addItem(args[0],title)
if (result == -1):
print "Uh-Oh, something went wrong."
if __name__ == "__main__":
main()
| Python | 0.000001 | |
77886d170cba5c2427982992f3ff54f6357e3a07 | add basic inverted index tool | inverted_index.py | inverted_index.py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# @author: Jason Wu (bowenwu@sohu-inc.com)
# This is a simple Inverted Index library to pretreatment for PMI compute or similar way
import math
import re
from operator import itemgetter
class InvertedIndex:
'''
Inverted Index class for docs
The library constructs an inverted index corpus from documents specified by the client or reading from input files.
It saves the document appear and handle some count for PMI or other algorithm.
'''
def __init__(self, stopword_filename = None):
'''
Initialize the index.
If a stopword file is specified, reads the stopword list from it, in
the format of one stopword per line.
Attributes:
stopword_filename: file with one stopword in one line
'''
self.num_docs = 0
self.term_doc = {} # term : [docnum]
self.stopwords = []
if stopword_filename:
stopword_file = open(stopword_filename, "r")
self.stopwords = [line.strip() for line in stopword_file]
def get_tokens(self, _str):
'''
Break a string into tokens, preserving URL tags as an entire token.
This implementation does not preserve case.
Clients may wish to override this behavior with their own tokenization.
Attributes:
_str: the string to split
'''
return _str.strip().split()
def add_input_document(self, _input):
'''
Add terms in the specified document to the inverted index.
Attributes:
_input: the input content
'''
words = set(self.get_tokens(_input))
for word in words:
try:
self.term_doc[word]
self.term_doc[word].append(num_docs)
except:
self.term_doc[word] = [num_docs]
self.num_docs += 1
def save_corpus_to_file(self, index_filename):
'''
Save the inverted index to the specified file.
Attributes:
index_filename: the specified file
'''
output_file = open(index_filename, "w")
output_file.write(str(self.num_docs) + "\n")
for key, value in term_doc.items():
output_file.write(key + "\t" + "\t".join(value) + "\n")
output_file.close()
def load_corpus_from_file(self, index_filename):
'''
Load corpus from index file, this file must builded from this class by save_corpus_to_file method
Attributes:
index_filename: build by save_corpus_to_file
'''
self.num_docs = 0
self.term_doc = {} # term : [docnum]
with open(index_filename) as fp:
for line in fp:
self.num_docs += 1
word, docs = line.split("\t", 1)
self.term_doc[word] = map(int, docs.split("\t"))
def get_num_docs(self):
'''
Return the total number of documents added.
'''
return self.num_docs
def concurrence(self, w1, w2):
'''
Return the concurrence of w1 and w2 in one document
Attributes:
w1: one word
w2: another word
'''
count = 0
try:
for item in self.term_doc[w1]:
if item in self.term_doc[w2] : count += 1
except:
pass
return count
def get_word_appear(self, word):
'''
Return the count of the document word appeared
Attributes:
word: the check word
'''
try:
return len(self.term_doc[word])
except:
return 0
| Python | 0.000001 | |
4d740138dc7101e2816837c070d3051835977d75 | Add lc0621_task_scheduler.py | lc0621_task_scheduler.py | lc0621_task_scheduler.py | """Leetcode 621. Task Scheduler
Medium
URL: https://leetcode.com/problems/task-scheduler/
Given a char array representing tasks CPU need to do. It contains capital letters
A to Z where different letters represent differenttasks. Tasks could be done
without original order. Each task could be done in one interval. For each
interval, CPU could finish one task or just be idle.
However, there is a non-negative cooling interval n that means between two same
tasks, there must be at least n intervals that CPU are doing different tasks or
just be idle.
You need to return the least number of intervals the CPU will take to finish all
the given tasks.
Example:
Input: tasks = ["A","A","A","B","B","B"], n = 2
Output: 8
Explanation: A -> B -> idle -> A -> B -> idle -> A -> B.
Note:
- The number of tasks is in the range [1, 10000].
- The integer n is in the range [0, 100].
"""
class Solution(object):
def leastInterval(self, tasks, n):
"""
:type tasks: List[str]
:type n: int
:rtype: int
"""
pass
def main():
pass
if __name__ == '__main__':
main()
| Python | 0.000004 | |
af8f7a09c6cf8a96b716d016fc3a983340760869 | Create problem10.py | python/problem10.py | python/problem10.py | import primes
def problem10(limit):
ps = itertools.takewhile(lambda x: x < limit, primes.Eppstein_Sieve())
# ps = primes.Eratosthenes(limit) # memory error
return sum(ps)
| Python | 0.000029 | |
5b276622f570adac64eda9932c7da47bf4bcd25c | Add PPM sample | ppm_practice.py | ppm_practice.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
class PpmImage(object):
"""PPM 画像を表すクラス"""
def __init__(self, name, width, height, image, depth=8):
"""
:param name:
:param width:
:param height:
:param image:
:param depth depth: 各色の階調数 (bit)
:return:
"""
self.name = name
self.width = width
self.height = height
self.image = image
self.depth = depth
def dump(self, fp):
"""ファイルに画像データを書き込む処理"""
fp.write('P3\n')
fp.write('# ' + self.name + '\n')
fp.write('{0:d} {1:d}\n'.format(self.width, self.height))
fp.write('{0:d}\n'.format(2 ** self.depth - 1))
# 画像の高さが不十分であれば例外を送出
if len(self.image) != self.height:
raise IndexError()
for row in self.image:
# 画像の幅が不十分であれば例外を送出
if len(row) != 3 * self.width:
raise IndexError()
for x in range(0, self.width * 3, 3):
fp.write('{0:3d} {1:3d} {2:3d}\n'.format(*row[x:x+3]))
if __name__ == '__main__':
# 適当な画像を作成
name = "test.ppm"
depth = 8
width = height = 64
data = [[(i + j) % 2 ** depth for i in range(3 * width)]
for j in range(height)]
image = PpmImage(name, width, height, data, depth=depth)
# ファイルに保存
with open("test.ppm", 'w') as f:
image.dump(f) | Python | 0 | |
4f404a71cb7ee912bca8184fe94c97d6cfba1186 | Add script to rotate a solid angle in the xz plane | preprocessing_tools/solid_rotation_y.py | preprocessing_tools/solid_rotation_y.py | '''
Rotates the protein by a solid angle on the plane xz
'''
import numpy
import os
from argparse import ArgumentParser
from move_prot_helper import (read_vertex, read_pqr, rotate_y,
modify_pqr)
def read_inputs():
"""
Parse command-line arguments to run move_protein.
User should provide:
-inMesh : str, mesh file you want to rotate.
-inpqr : str, pqr of the object you want to rotate.
-alpha_y: float [degrees], rotation angle, about the dipole moment.
-name : str, output file name.
"""
parser = ArgumentParser(description='Manage solid_rotation_y command line arguments')
parser.add_argument('-im', '--inMesh', dest='im', type=str, default=None,
help="mesh file you want to rotate")
parser.add_argument('-ip', '--inpqr', dest='ip', type=str, default=None,
help="pqr of the object you want to rotate")
parser.add_argument('-angy', '--angle_y', dest='angy', type=float, default=None,
help="rotation angle in the plane xz")
parser.add_argument('-n', '--name', dest='name', type=str, default='',
help="output file name")
return parser.parse_args()
args = read_inputs()
inMesh = args.im
inpqr = args.ip
angle_y = float(args.angy)*numpy.pi/180.
name = args.name
outMesh = inMesh + name
outpqr = inpqr + name
#Read mesh and pqr
#vert = read_vertex(inMesh+'.vert', float)
vert = numpy.loadtxt(inMesh+'.vert', dtype=float)
xq, q, Nq = read_pqr(inpqr+'.pqr', float)
xq_new = rotate_y(xq, angle_y)
vert_new = rotate_y(vert, angle_y)
ctr = numpy.average(vert_new, axis=0)
r_min_last = numpy.min(numpy.linalg.norm(vert_new, axis=1))
idx_rmin_last = numpy.argmin(numpy.linalg.norm(vert_new, axis=1))
print ('Desired configuration:')
print ('\tProtein is centered, {}'.format(ctr))
print ('\tProtein r minimum is {}, located at {}'.format(r_min_last,
vert_new[idx_rmin_last, :]))
#### Save to file
numpy.savetxt(outMesh+'.vert', vert_new)
cmd = 'cp '+inMesh+'.face '+outMesh+'.face'
os.system(cmd)
modify_pqr(inpqr+'.pqr', outpqr+'.pqr', xq_new)
print ('\nWritten to '+outMesh+'.vert(.face) and '+outpqr+'.pqr')
| Python | 0 | |
9e32b1211c0096488142192f5e280fe6258f368e | Add basic SG generator with limited features | misc/sg_generator.py | misc/sg_generator.py | #!/usr/bin/python -u
#
# Copyright (c) 2016 Balazs Nemeth
#
# This file is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POX. If not, see <http://www.gnu.org/licenses/>.
"""
Generates requests that which can be used as standard test SG-s to cover
most/all functionalities of ESCAPE.
"""
import sys, os
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),
"../../escape/escape/nffg_lib/")))
from nffg import NFFG, NFFGToolBox
import random
def gen_8loop_tests (saps, vnfs, seed, add_req=True):
"""
Generates simple request NFFGs in all combinations of sap1-->vnf1-->...-->
vnfn-->sap1. With a loop requirement if add_req is set.
:param saps: list of sap ID-s from the network
:type saps: list
:param vnfs: list of VNF **Types** which should be instantiated
:type vnfs: list
:param seed: seed for random generator
:type seed: int
:param add_req: If set EdgeReq objects are added
:type add_req: bool
:return: a generator over :any:`NFFG`
:rtype: generator
"""
random.seed(seed)
for sap in saps:
nffg = NFFG()
sapo = nffg.add_sap(id=sap, name=sap+"_name")
sapp = sapo.add_port()
vnfs1 = random.sample(vnfs, random.randint(len(vnfs),len(vnfs)))
vnfs2 = random.sample(vnfs, random.randint(len(vnfs),len(vnfs)))
nfmiddle = nffg.add_nf(id="nf0", name="nf_middle", func_type=random.choice(vnfs1),
cpu=1, mem=1, storage=1)
vnfs1.remove(nfmiddle.functional_type)
try:
vnfs2.remove(nfmiddle.functional_type)
except ValueError:
pass
i = 1
once = True
for vnf_list in (vnfs1, vnfs2):
nf0 = nfmiddle
for vnf in vnf_list:
nf1 = nffg.add_nf(id="nf"+str(i), name="nf"+str(i)+"_"+vnf, func_type=vnf,
cpu=1, mem=1, storage=1)
nffg.add_sglink(src_port=nf0.add_port(), dst_port=nf1.add_port(),
flowclass="HTTP", id=i)
nf0 = nf1
i+=1
if once:
nffg.add_sglink(src_port=nf0.add_port(), dst_port=nfmiddle.add_port(),
flowclass="HTTP", id=i)
once = False
i+=1
nffg.add_sglink(src_port=nf1.add_port(), dst_port=sapp,
flowclass="HTTP", id=i)
nffg.add_sglink(src_port=sapp, dst_port=nfmiddle.add_port(),
flowclass="HTTP", id=i+1)
yield nffg
def gen_simple_oneloop_tests (saps, vnfs):
"""
Generates simple request NFFGs in all combinations of sap1-->vnf1-->sap1.
With a loop requirement
:param saps: list of sap ID-s from the network
:type saps: list
:param vnfs: list of VNF **Types** which should be instantiated
:type vnfs: list
:return: a generator over :any:`NFFG`
:rtype: generator
"""
for sap in saps:
for vnf in vnfs:
nffg = NFFG()
sapo = nffg.add_sap(id=sap, name=sap+"_name")
nfo = nffg.add_nf(id="nf", name="nf_"+vnf, func_type=vnf,
cpu=1, mem=1, storage=1)
sapp = sapo.add_port()
nffg.add_sglink(src_port=sapp, dst_port=nfo.add_port(),
flowclass="HTTP", id=1)
nffg.add_sglink(src_port=nfo.add_port(), dst_port=sapp,
flowclass="HTTP", id=2)
nffg.add_req(src_port=sapp, dst_port=sapp, delay=50, bandwidth=1,
sg_path=[1,2])
yield nffg
if __name__ == '__main__':
for nffg in gen_8loop_tests(saps=['SAP11'],
vnfs=['camtest:1.0', 'controller2:1.0',
'controller1:1.0', 'javacontroller:1.0', 'mover:1.0', 'dal:1.0'],
seed=4):
print nffg.network.edges(keys=True)
i = 600
for sg in nffg.sg_hops:
nffg.add_sglink(src_port=sg.dst, dst_port=sg.src, id=i, delay=sg.delay,
bandwidth=sg.bandwidth)
i+=1
for sgprime in [sg for sg in nffg.sg_hops if sg.id < 600]:
nffg.del_edge(src=sgprime.src, dst=sgprime.dst, id=sgprime.id)
print nffg.dump()
"""
for nffg in gen_simple_oneloop_tests (saps=['SAP1', 'SAP2', 'SAP3', 'SAP54'],
vnfs=['headerCompressor', 'headerDecompressor', 'simpleForwarder',
'splitter', 'nat', 'firewal', 'dpi', 'webserver',
'balance_server', 'bridge']):
print nffg.network.node
"""
| Python | 0 | |
468302f6552be4c61e74aa4147d83465287aa2e8 | Revert "Moved "linked_clone.py" script into samples directory" | linked_clone.py | linked_clone.py | #!/usr/bin/env python
"""
Written by Reuben ur Rahman
Github: https://github.com/rreubenur
Email: reuben.13@gmail.com
Linked clone example
"""
import atexit
import requests.packages.urllib3 as urllib3
import ssl
from pyVmomi import vim
from pyVim.connect import SmartConnect, Disconnect
from tools import cli
from tools import tasks
def get_args():
parser = cli.build_arg_parser()
parser.add_argument('-v', '--vm_name',
required=True,
action='store',
help='Name of the new VM')
parser.add_argument('--template_name',
required=True,
action='store',
help='Name of the template/VM you are cloning from')
parser.add_argument('--datacenter_name',
required=False,
action='store',
default=None,
help='Name of the Datacenter you wish to use.')
parser.add_argument('--cluster_name',
required=False,
action='store',
default=None,
help='Name of the cluster you wish to use')
parser.add_argument('--host_name',
required=False,
action='store',
default=None,
help='Name of the cluster you wish to use')
args = parser.parse_args()
cli.prompt_for_password(args)
return args
def get_obj(content, vimtype, name, folder=None):
obj = None
if not folder:
folder = content.rootFolder
container = content.viewManager.CreateContainerView(folder, vimtype, True)
for item in container.view:
if item.name == name:
obj = item
break
return obj
def _clone_vm(si, template, vm_name, vm_folder, location):
clone_spec = vim.vm.CloneSpec(
powerOn=True, template=False, location=location,
snapshot=template.snapshot.rootSnapshotList[0].snapshot)
task = template.Clone(name=vm_name, folder=vm_folder, spec=clone_spec)
tasks.wait_for_tasks(si, [task])
print "Successfully cloned and created the VM '{}'".format(vm_name)
def _get_relocation_spec(host, resource_pool):
relospec = vim.vm.RelocateSpec()
relospec.diskMoveType = 'createNewChildDiskBacking'
relospec.host = host
relospec.pool = resource_pool
return relospec
def _take_template_snapshot(si, vm):
if len(vm.rootSnapshot) < 1:
task = vm.CreateSnapshot_Task(name='test_snapshot',
memory=False,
quiesce=False)
tasks.wait_for_tasks(si, [task])
print "Successfully taken snapshot of '{}'".format(vm.name)
def main():
args = get_args()
urllib3.disable_warnings()
si = None
context = None
if hasattr(ssl, 'SSLContext'):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_NONE
if context:
# Python >= 2.7.9
si = SmartConnect(host=args.host,
port=int(args.port),
user=args.user,
pwd=args.password,
sslContext=context)
else:
# Python >= 2.7.7
si = SmartConnect(host=args.host,
port=int(args.port),
user=args.user,
pwd=args.password)
atexit.register(Disconnect, si)
print "Connected to vCenter Server"
content = si.RetrieveContent()
datacenter = get_obj(content, [vim.Datacenter], args.datacenter_name)
if not datacenter:
raise Exception("Couldn't find the Datacenter with the provided name "
"'{}'".format(args.datacenter_name))
cluster = get_obj(content, [vim.ClusterComputeResource], args.cluster_name,
datacenter.hostFolder)
if not cluster:
raise Exception("Couldn't find the Cluster with the provided name "
"'{}'".format(args.cluster_name))
host_obj = None
for host in cluster.host:
if host.name == args.host_name:
host_obj = host
break
vm_folder = datacenter.vmFolder
template = get_obj(content, [vim.VirtualMachine], args.template_name,
vm_folder)
if not template:
raise Exception("Couldn't find the template with the provided name "
"'{}'".format(args.template_name))
location = _get_relocation_spec(host_obj, cluster.resourcePool)
_take_template_snapshot(si, template)
_clone_vm(si, template, args.vm_name, vm_folder, location)
if __name__ == "__main__":
main()
| Python | 0 | |
daf23cbb6d6015a2819de5d089a35903cbce9441 | Create katakan.py | list/katakan.py | list/katakan.py | """
4
2 belas
seratus 4 puluh 0
9 ribu seratus 2 puluh 1
2 puluh 1 ribu 3 puluh 0
9 ratus 5 ribu 0
8 puluh 2 juta 8 ratus 8 belas ribu seratus 8 puluh 8
3 ratus 1 juta 4 puluh 8 ribu 5 ratus 8 puluh 8
"""
def kata(n):
angka = range(11)
temp = ""
if n < 12:
temp += str(angka[n])
elif n < 20:
temp += str(n-10)+" belas"
elif n < 100:
temp += str(kata(n/10)) + " puluh "+ str(kata(n%10))
elif n < 200:
temp += "seratus "+ str(kata(n-100))
elif n < 1000:
temp += str(kata(n/100))+ " ratus " + str(kata(n%100))
elif n < 2000:
temp += "seribu "+str(kata(n-1000))
elif n < 1000000:
temp += str(kata(n/1000))+ " ribu "+ str(kata(n%1000))
elif n < 1000000000:
temp += str(kata(n/1000000)) +" juta " + str(kata(n%1000000))
return temp
print kata(4)
print kata(12)
print kata(140)
print kata(9121)
print kata(21030)
print kata(905000)
print kata(82818188)
print kata(301048588)
| Python | 0.000004 | |
1555164ff275436de580a33735a2d8c6e6893b42 | Create lab4.py | laboratorios/lab4.py | laboratorios/lab4.py | #lab 4
#josue dde leon
for i in range (1, 4):
nombre = input("\n\nintroduce nombre: ")
n1 = input ("Introduce nota 1: ")
n2 = input ("Introduce nota 2: ")
n3 = input ("Introduce nota 3: ")
n4 = input ("Introduce nota 4: ")
n5 = input ("Introduce nota 5: ")
prom=(float(n1)+float(n2)+float(n3)+float(n4)+float(n5))/5
print ("\nNombre: " + str(nombre))
print ("\nQuiz 1: " + str(n1))
print ("Quiz 2: " + str(n2))
print ("Quiz 3: " + str(n3))
print ("Quiz 4: " + str(n4))
print ("Quiz 5: " + str(n5))
print ("\n\nEl promedio de " + str(nombre) + " es " + str(prom))
archivo = open(nombre, 'w')
archivo.write("Nombre: " + str(nombre))
archivo.write("\nquiz 1: " + n1)
archivo.write("\nquiz 2: " + n2)
archivo.write("\nquiz 3: " + n3)
archivo.write("\nquiz 4: " + n4)
archivo.write("\nquiz 5: " + n5)
archivo.write("\nEl promedio de " + str(nombre) + " es " + str(prom))
archivo.close()
| Python | 0.000001 | |
7b06edf37a630d4582fc84832cd1d40b790e4aa3 | Add server | pygls/server.py | pygls/server.py | import asyncio
import logging
from .protocol import LanguageServerProtocol
logger = logging.getLogger(__name__)
class Server:
def __init__(self, protocol_cls):
assert issubclass(protocol_cls, asyncio.Protocol)
self.loop = asyncio.get_event_loop()
self.lsp = protocol_cls(self)
self.server = None
def shutdown(self):
self.server.close()
# TODO: Gracefully shutdown event loops
def start_tcp(self, host, port):
self.server = self.loop.run_until_complete(
self.loop.create_server(self.lsp, host, port)
)
self.loop.run_forever()
class LanguageServer(Server):
def __init__(self):
super().__init__(LanguageServerProtocol)
def command(self, command_name):
'''
Registers new command (delegating to FeatureManager).
Args:
command_name(str): Name of the command to register
'''
return self.lsp.fm.command(command_name)
def feature(self, *feature_names, **options):
'''
Registers one or more LSP features (delegating to FeatureManager).
Args:
*feature_names(tuple): One or more features to register
NOTE: All possible LSP features are listed in lsp module
**options(dict): Options for registered feature
E.G. triggerCharacters=['.']
'''
return self.lsp.fm.feature(*feature_names, **options)
def thread(self):
return self.lsp.thread()
| Python | 0.000001 | |
357ce31d1f28fbc5d12a23dfd3bb2aa40a4e27a3 | Add serialdumpbytexor.py | serialdumpbytexor.py | serialdumpbytexor.py | #!/usr/bin/env python
import sys, serial
if __name__ == '__main__':
ser = serial.Serial('/dev/cu.usbserial-A8004ISG', 115200, timeout=10, xonxoff=0, rtscts=0)
# ser.open()
bb = bytearray(512)
while 1:
ba = bytearray(ser.read(1024))
for i in range(512):
j = i * 2
bb[i] = ba[j] ^ ba[j+1]
sys.stdout.write(bb)
sys.stdout.flush()
| Python | 0.000128 | |
ca99e80e04a1d7fb3ff3698f23cdc19c8ec16113 | add refresh test | refresh_test.py | refresh_test.py | #!/usr/bin/env python
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright (c) 2017 ScyllaDB
import time
from avocado import main
from sdcm.tester import ClusterTester
from sdcm.nemesis import RefreshMonkey
from sdcm.nemesis import RefreshBigMonkey
class RefreshTest(ClusterTester):
"""
Nodetool refresh after uploading lot of data to a cluster with running load in the background.
:avocado: enable
"""
def test_refresh_small_node(self):
self.db_cluster.add_nemesis(nemesis=RefreshMonkey,
loaders=self.loaders,
monitoring_set=self.monitors)
# run a write workload
stress_queue = self.run_stress_thread(stress_cmd=self.params.get('stress_cmd'),
stress_num=2,
keyspace_num=1)
time.sleep(30)
self.db_cluster.start_nemesis()
self.db_cluster.stop_nemesis(timeout=None)
self.get_stress_results(queue=stress_queue, stress_num=2, keyspace_num=1)
def test_refresh_big_node(self):
self.db_cluster.add_nemesis(nemesis=RefreshBigMonkey,
loaders=self.loaders,
monitoring_set=self.monitors)
# run a write workload
stress_queue = self.run_stress_thread(stress_cmd=self.params.get('stress_cmd'),
stress_num=2,
keyspace_num=1)
time.sleep(30)
self.db_cluster.start_nemesis()
self.db_cluster.stop_nemesis(timeout=None)
self.get_stress_results(queue=stress_queue, stress_num=2, keyspace_num=1)
if __name__ == '__main__':
main()
| Python | 0 | |
b440872f71d37cc5bf110eb0c7c13a4a2dcb7f6c | create utils package, field_template_read update var name to template render | opps/fields/utils.py | opps/fields/utils.py | # -*- coding: utf-8 -*-
def field_template_read(obj):
"""Use replace because the django template can't read variable with "-"
"""
fields = {}
for o in obj:
fields[o.replace("-", "_")] = obj[o]
return fields
| Python | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.