index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
5,000 | 7130a382784955780a3f258c81ce05c61915af56 | import numpy as np
def get_mask(mask):
r = mask[:, :, 0]
g = mask[:, :, 1]
return r // (r.max() or 1) * -1 + g // (g.max() or 1)
def calculate_brightness(image):
weights = np.array([0.299, 0.587, 0.114])
brightness_matrix = (image*weights).sum(axis=2)
return brightness_matrix
def calculate_energy(brightness):
x_gradient = np.hstack((
(brightness[:, 1] - brightness[:, 0])[:, np.newaxis],
brightness[:, 2:] - brightness[:, :-2],
(brightness[:, -1] - brightness[:, -2])[:, np.newaxis]
))
y_gradient = np.vstack((
brightness[1, :] - brightness[0, :],
brightness[2:, :] - brightness[:-2, :],
brightness[-1, :] - brightness[-2, :]
))
return np.sqrt(x_gradient ** 2 + y_gradient ** 2)
def calculate_minimal_seam_matrix(pre_energy, mask=None):
min_seam_searcher = pre_energy + mask if mask is not None else pre_energy.copy()
for i in range(1, min_seam_searcher.shape[0]):
row = min_seam_searcher[i-1]
minimum = np.vstack((np.insert(row[:-1], 0, row[0]), row, np.append(row[1:], row[-1]))).min(axis=0)
min_seam_searcher[i] += minimum
return min_seam_searcher
def get_minimal_seam(min_seam):
seam = np.zeros(min_seam.shape[0], dtype=np.int32)
seam[-1] = np.argmin(min_seam[-1])
for i in range(min_seam.shape[0] - 2, -1, -1):
last = seam[i+1]
if last == 0:
seam[i] = np.argmin(min_seam[i, : 2])
elif last == min_seam.shape[1] - 1:
seam[i] = last + np.argmin(min_seam[i, (last - 1):]) - 1
else:
seam[i] = last + np.argmin(min_seam[i, (last - 1): (last + 2)]) - 1
return seam
def cut(image, mask):
brightness = calculate_brightness(image)
energy = calculate_energy(brightness)
mult = image.shape[0] * image.shape[1] * 256
min_seam = calculate_minimal_seam_matrix(energy, mask * mult if mask is not None else None)
seam = get_minimal_seam(min_seam)
copy = np.empty((image.shape[0], image.shape[1] - 1, 3), np.uint8)
copy_mask = np.empty((image.shape[0], image.shape[1] - 1), np.int32) if mask is not None else None
seam_mask = np.zeros(image.shape[:2], dtype=np.uint8)
for row, i in enumerate(seam):
copy[row] = np.delete(image[row], i, axis=0)
if mask is not None:
copy_mask[row] = np.delete(mask[row], i, axis=0)
seam_mask[row][i] = 1
return copy, copy_mask, seam_mask
def extend(image, mask):
brightness = calculate_brightness(image)
energy = calculate_energy(brightness)
mult = image.shape[0] * image.shape[1] * 256
min_seam = calculate_minimal_seam_matrix(energy, mask * mult if mask is not None else None)
seam = get_minimal_seam(min_seam)
copy = np.empty((image.shape[0], image.shape[1] + 1, 3), np.uint8)
copy_mask = np.zeros((image.shape[0], image.shape[1] + 1), np.int32) if mask is not None else None
seam_mask = np.zeros(image.shape[:2], dtype=np.uint8)
for row, i in enumerate(seam):
if i >= image.shape[1] - 1:
copy[row] = np.concatenate((image[row], [image[row][-1]]), axis=0)
if mask is not None:
copy_mask[row] = np.append(mask[row], 0)
copy_mask[row][-2] = 1
copy_mask[row][-1] = 1
else:
copy[row] = np.insert(image[row], i+1, image[row][i] // 2 + image[row][i+1] // 2, axis=0)
if mask is not None:
copy_mask[row] = np.insert(mask[row], i+1, 0, axis=0)
copy_mask[row][i] = 1
copy_mask[row][i+1] = 1
seam_mask[row][i] = 1
return copy, copy_mask, seam_mask
def seam_carve(image, mode, mask):
if mode == 'horizontal shrink':
return cut(image, mask)
elif mode == 'vertical shrink':
transposed_image, transposed_mask, transposed_seam_mask = cut(
np.transpose(image, (1, 0, 2)), mask.T if mask is not None else None
)
return (np.transpose(transposed_image, (1, 0, 2)),
transposed_mask.T if mask is not None else None,
transposed_seam_mask.T)
elif mode == 'horizontal expand':
return extend(image, mask)
else:
transposed_image, transposed_mask, transposed_seam_mask = extend(
np.transpose(image, (1, 0, 2)), mask.T if mask is not None else None
)
return (np.transpose(transposed_image, (1, 0, 2)),
transposed_mask.T if mask is not None else None,
transposed_seam_mask.T)
|
5,001 | e204cbbf36ac180eba0e95916345088c77bca7c0 | #!/usr/bin/python
import wx
class test(wx.Frame):
def __init__(self,parent,id):
wx.Frame.__init__(self,parent,id,"TestFrame",size=(500,500))
if __name__ == '__main__':
app = wx.PySimpleApp()
frame = test(parent=None,id=-1,)
frame.show()
app.mainloop()
|
5,002 | 2bc3b0df720788e43da3d9c28adb22b3b1be8c58 | import logging
from django.contrib.auth.models import User
import json
from django.http import HttpResponse
from enumfields.fields import EnumFieldMixin
from Api.models import Status
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def check_cookie(request):
# Post.objects.all().delete()
result = {
"status": True
}
try:
user_id = request.GET.get('user_id')
user = User.objects.get(pk=user_id)
cookie_status = user.profile.cookie_status
if cookie_status is Status.DEACTIVATE:
result['cookie_status'] = "0"
elif cookie_status is Status.ACTIVATE:
result['cookie_status'] = "1"
elif cookie_status is Status.EMPTY:
result['cookie_status'] = "2"
elif cookie_status is Status.WARNING:
result['cookie_status'] = "3"
elif cookie_status is Status.ERROR:
result['cookie_status'] = "4"
except Exception as e:
logger.info(e)
result["status"] = False
return HttpResponse(json.dumps(result), content_type="application/json")
|
5,003 | 13e2f474294edb7c78bd81456097d1389e6a0f1b | from .isearch import ISearcher
__all__ = ['ISearcher']
|
5,004 | d960d3d1680f825f0f68fc6d66f491bbbba805ce | # Kipland Melton
import psutil
import math
def convert_size(size_bytes):
if size_bytes == 0:
return "0B"
size_name = ("%", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
i = int(math.floor(math.log(size_bytes, 1024)))
p = math.pow(1024, i)
s = round(size_bytes / p, 2)
return "%s %s" % (s, size_name[i])
def RetrieveMemory():
# Holds returned information from Psutil library involving memory
ram_info = psutil.virtual_memory()
typePresented = ("Total : ","Used : ","Free : ", "Usage : ")
# Main formatting data presentation loop
counter = 0
print()
for info in ram_info:
#print("iteration:",counter)
try:
if info > 100:
print(typePresented[counter],convert_size(info))
counter += 1
else:
print(typePresented[3],convert_size(info))
except IndexError:
continue
if __name__ == "__main__":
RetrieveMemory() |
5,005 | b7606befe123c4fb6840a1bc62e43e6721edfcc3 | import boto3
import json
region = 'us-east-2'
ec2 = boto3.resource('ec2',region)
ImageId = 'ami-07efac79022b86107'
KeyName = 'aws_keypair'
InstanceType = 't2.micro'
#IamInstanceProfile =
instances = ec2.create_instances(
ImageId =ImageId,
MinCount = 1,
MaxCount = 5,
KeyName = KeyName,
InstanceType = InstanceType,
IamInstanceProfile = {
'Name' : 'Test-ec2-pro',
}
)
|
5,006 | d90a4b00d97cecf3612915a72e48a363c5dcc97b | #!/usr/bin/python3
"""Locked class module"""
class LockedClass:
"""test class with locked dynamic attruibute creation
"""
__slots__ = 'first_name'
|
5,007 | b1c8aceab44574d0f53d30969861be028c920ef2 | # Create your views here.
# -*- coding: utf-8 -*-
from json import dumps
from django.shortcuts import render_to_response
from django.http import Http404, HttpResponseRedirect, HttpResponse
from django.template import RequestContext
from django.conf import settings
from utils import Utils, MERCS, ENTITY, PARTS, ARMY, DETAIL_INFO
RUNNING_INFO = {}
TIMER = []
class Crisis_View():
def __init__(self):
self.utils = Utils()
self.utils.watch_dog_runner()
def is_exist(self, uid):
if uid in RUNNING_INFO.keys():
return True
return False
def login(self, request):
""" Login """
context = {}
request.session['is_auth'] = False
if request.method == "POST":
if "connect" in request.POST:
try:
uid = request.POST.get("uid", None)
auth = request.POST.get("auth", None)
if uid is not None:
uid = str(uid)
# uid = "vk:2342994"
# auth = "c0a426784e761547e57afcc6d2bbc367"
request.session['uid'] = uid
flag, data = self.utils.get_participant_info(uid, auth)
if flag:
request.session['is_auth'] = True
if uid.startswith("br"):
request.session['is_daily'] = False
else:
request.session['is_daily'] = True
request.session["is_leader"] = data.get("is_leader", False)
if uid not in RUNNING_INFO.keys():
data.update({"is_run": False, "is_attack": False})
else:
temp = RUNNING_INFO[uid]
temp.update(data)
data = temp
RUNNING_INFO.update({uid: data})
context.update(data)
return HttpResponseRedirect('order')
else:
context = {"error": True, "message": data}
except Exception, err:
self.utils.logger.error(err.message)
if "Unable to create a new session key" in err.message:
context = {"error": True, "message": "Internal server Error. Please, contact administrator and try later..."}
else:
context = {"error": True, "message": "User select failed. Please, try again..."}
return render_to_response("crisis/user_select.html",
context,
context_instance=RequestContext(request))
def info(self, request):
return HttpResponse("= %s =" % RUNNING_INFO)
def order(self, request):
""" Gather resources and order units """
is_auth = request.session.get("is_auth", False)
if not is_auth:
return HttpResponseRedirect('/crisis')
uid = request.session['uid']
context = RUNNING_INFO.get(uid, {})
context.update({"is_auth": is_auth,
"is_daily": request.session.get("is_daily", False),
"is_leader": request.session.get("is_leader", False),
"entity_list": ENTITY,
"parts_list": PARTS,
"detail_info_list": DETAIL_INFO})
if "priority" not in context:
priority = {}
for item in ARMY:
priority.update({item: 1})
context.update({"priority": priority})
if context.get("is_run", False):
context.update({"left_time": self.utils.get_remaining_time(uid),
"order": self.utils.get_current_unit_order(uid)})
""" Context Example
context = {"username": self.utils.get_user_name(uid),
"is_run": False,
"is_auth": is_auth,
"resource": {"money": 100, "food": 200, "fuel": 300},
"entity": {"armor_composite": 1, "armor_plate": 2, "control_block": 3,
"gun_receiver": 4, "kevlar_fiber": 5, "laser_aimer": 6,
"powder_charge": 7, "rare_item": 8, "tnt_charge": 9},
"parts": {"artillery_armor": 1, "artillery_chassis": 2, "artillery_shell": 3, "detonator": 4,
"gunner_armor": 5, "gunner_gun": 6, "jeep_armor": 7, "jeep_gun": 8, "sniper_armor": 9,
"sniper_gun": 10, "soldier_gun": 11, "tank_chassis": 12, "thrower_armor": 13,
"thrower_gun": 14, "wave_emitter": 15},
'order': {'soldier': 1, 'thrower': 4, 'artillery': 8, 'gunner': 2, 'base_artillery': 7, 'jeep': 6, 'artillery_emp': 9, 'base_tank': 5, 'artillery_cassete': 0, 'sniper': 3}
}
"""
if request.method == "POST":
if "start" in request.POST:
order, priority = {}, {}
data = dict(request.POST)
for item in ARMY:
try:
count = int(data.get(item, [''])[0])
except:
count = 0
try:
prior = int(data.get("%s_priority" % item, [''])[0])
except:
prior = 1
order.update({item: count})
priority.update({item: prior})
context.update({"is_run": True,
"order": order,
"priority": priority,
"left_time": self.utils.get_remaining_time(uid)})
RUNNING_INFO.update({uid: context})
self.utils.start_gather(uid, context)
elif "stop" in request.POST:
uid = request.session['uid']
context = RUNNING_INFO.get(uid, {})
context.update({"is_run": False, "left_time": "00:00:00"})
RUNNING_INFO.update({uid: context})
self.utils.stop_gather(uid)
return render_to_response("crisis/order.html",
context,
context_instance=RequestContext(request))
def daily(self, request):
""" Set Daily Mercenary """
is_auth = request.session.get("is_auth", False)
is_daily = request.session.get("is_daily", False)
if not is_auth or not is_daily:
return HttpResponseRedirect('/crisis')
uid = request.session.get("uid", None)
params = self.utils.get_daily_params(uid)
context = {"username": self.utils.get_user_name(uid),
"is_auth": is_auth,
"is_daily": is_daily,
"is_leader": request.session.get("is_leader", False),
"mercs": ["off"] + MERCS + ["random"],
"daily_merc": params[0],
"daily_schema": params[1],
"event_schema": params[2],
"group_plugin": params[3],
}
if request.method == "POST":
if "save" in request.POST:
daily_merc = request.POST.get("daily_merc", None)
self.utils.update_participant_params(uid, "DAILY_MERC", daily_merc)
daily_schema = request.POST.get("daily_schema", None)
self.utils.update_participant_params(uid, "DAILY_SCHEMA", daily_schema)
event_schema = request.POST.get("event_schema", None)
self.utils.update_participant_params(uid, "EVENT_SCHEMA", event_schema)
group_plugin = request.POST.get("group_plugin", None)
self.utils.update_participant_params(uid, "GROUP_PLUGIN", group_plugin)
context.update({"daily_merc": daily_merc,
"daily_schema": daily_schema,
"event_schema": event_schema,
"group_plugin": group_plugin})
return render_to_response("crisis/daily.html",
context,
context_instance=RequestContext(request))
def trade(self, request):
""" Buy entities at trade house """
is_auth = request.session.get("is_auth", False)
is_daily = request.session.get("is_daily", False)
if not is_auth or not is_daily:
return HttpResponseRedirect('/crisis')
uid = request.session.get("uid", None)
entity_order = self.utils.get_trade_order(uid)
context = {"username": self.utils.get_user_name(uid),
"is_auth": is_auth,
"is_daily": is_daily,
"is_leader": request.session.get("is_leader", False),
"slicer_list": [":4", "4:8", "8:"],
"entities": ["soldier_gun", "gunner_gun"] + ENTITY,
"entity_order": entity_order,
}
if request.method == "POST":
if "save" in request.POST:
entity_order = {}
for key, value in dict(request.POST).iteritems():
if "@money" in key or "@gold" in key:
entity, kind = key.split("@")
temp = list(entity_order.get(entity, []))
if kind not in temp:
temp.append(str(kind))
entity_order.update({entity: temp})
elif "money_limit" in key:
value = value[0]
if value:
if int(value) < 1000:
money_limit = 1000
else:
money_limit = int(value)
else:
money_limit = 10000
entity_order.update({"money_limit": money_limit})
context.update({"entity_order": entity_order})
self.utils.update_participant_params(uid, "TRADE_ORDER", dumps(entity_order))
return render_to_response("crisis/trade.html",
context,
context_instance=RequestContext(request))
def city(self, request):
""" Attack city """
is_auth = request.session.get("is_auth", False)
is_daily = request.session.get("is_daily", False)
if not is_auth or not is_daily:
return HttpResponseRedirect('/crisis')
uid = request.session.get("uid", None)
regions, cities = self.utils.get_region_cities()
context = RUNNING_INFO.get(uid, {})
context.update({"username": self.utils.get_user_name(uid),
"is_auth": is_auth,
"is_daily": is_daily,
"is_leader": request.session.get("is_leader", False),
"is_attack": context.get("is_attack", False),
"regions": regions,
"cities": cities
})
if request.method == "POST":
if "start" in request.POST:
try:
context.update({"is_attack": True, "selected_city": request.POST.get("city")})
RUNNING_INFO.update({uid: context})
self.utils.start_city_attack(uid, context)
except Exception, err:
self.utils.logger.error("Error during start city attack: %s" % err)
elif "stop" in request.POST:
try:
context.update({"is_attack": False})
self.utils.stop_city_attack(uid)
except:
pass
return render_to_response("crisis/city.html",
context,
context_instance=RequestContext(request))
def statistics(self, request):
""" Clan participants weekly statistics """
context = {"uid": request.session.get("uid", None),
"is_auth": request.session.get("is_auth", False),
"is_daily": request.session.get("is_daily", False),
"is_leader": request.session.get("is_leader", False),
}
context.update({"statistics": self.utils.get_statistics()})
# "dates": self.utils.get_artefacts_dates()})
return render_to_response("crisis/statistics.html",
context,
context_instance=RequestContext(request))
def about(self, request):
""" Information about developer """
context = {"is_auth": request.session.get("is_auth", False),
"is_daily": request.session.get("is_daily", False),
"is_leader": request.session.get("is_leader", False),
"authors": settings.ADMINS}
return render_to_response("crisis/about.html",
context,
context_instance=RequestContext(request)) |
5,008 | fb2ef5a90b6e2582450726905868dd1b78e36166 | # 2019/10/08 2019년10월8일
ss = input('날짜: 년/월/일 입력-> ')
sslist = ss.split('/')
print(sslist)
print('입력하신 날짜의 10년 후 -> ', end='')
year = int(sslist[0]) + 10
print(str(year) + "년", end='')
print(sslist[1] + "월", end='')
print(sslist[2] + "일")
|
5,009 | 14f3c941856ddf6bd7b3e046f21072f0b5f7b036 | class Solution:
def minimumDeletions(self, nums: List[int]) -> int:
n = len(nums)
a = nums.index(min(nums))
b = nums.index(max(nums))
if a > b:
a, b = b, a
return min(a + 1 + n - b, b + 1, n - a)
|
5,010 | 394f835064d070a30040b6f01b25b6f0e005827d | """
Created on Fri Jan 07 20:53:58 2022
@author: Ankit Bharti
"""
from unittest import TestCase, main
from cuboid_volume import *
class TestCuboid(TestCase):
def test_volume(self):
self.assertAlmostEqual(cuboid_volume(2), 8)
self.assertAlmostEqual(cuboid_volume(1), 1)
self.assertAlmostEqual(cuboid_volume(0), 0)
def test_input_value(self):
self.assertRaises(TypeError, cuboid_volume, 'ank')
def test_addition(self):
self.assertEqual(add(3, 4), 7)
self.assertAlmostEqual(add(4.5, 6.2), 10.701, places=2)
def test_addition_input_value(self):
self.assertRaises(TypeError, add, 'ank', 6)
if __name__ == '__main__':
main()
|
5,011 | 4bd6a7c7fc6a788b2cb010f6513872bd3e0d396c | import os
import random
readpath = './DBLP/'
writepath = './DBLP/'
dataname = 'dblp.txt'
labelname = 'node2label.txt'
testsetname = writepath + 'dblp_testset.txt'
def run(save_rate):
rdataname = readpath + dataname
rlabelname = readpath + labelname
wdataname = writepath + dataname
wlabelname = writepath + labelname
ordata = []
all_user = set()
all_time = set()
rename = dict()
newdatasize = 0
with open(rdataname, 'r') as r:
for line in r:
x = line.strip('\n').split()
x[2] = float(x[2])
ordata.append(x)
ordata = sorted(ordata, key = lambda x:x[2])
datasize = len(ordata)
savesize = int(datasize * save_rate)
print("原始数据中共有 %d 条\n预计保留 %d 条" % (datasize, savesize))
while(savesize != datasize and ordata[savesize-1][2] == ordata[savesize][2]):
savesize = savesize + 1
print("实际保留 %d 条" % savesize)
print("实际切割比例" + str(savesize/datasize))
for i in range(savesize):
x = ordata[i]
a = str(x[0])
b = str(x[1])
all_user.update({a,b})
#print(len(all_user))
all_time.add(x[2])
print("实际保留数据中,用户数量 %d 个,不同时间节点 %d 个" %(len(all_user), len(all_time)))
newdatasize = savesize
list_all_user = list(all_user)
list_all_user = [int(i) for i in list_all_user]
list_all_user.sort()
step = 0
for i in list_all_user:
rename[i] = step
#print(i, rename[i])
step = step + 1
flag = os.path.exists(writepath)
if not flag:
os.makedirs(writepath)
with open(wdataname, 'w') as w:
for i in range(newdatasize):
x = ordata[i]
a = str(rename[int(x[0])])
b = str(rename[int(x[1])])
w.write(a + ' ' + b + ' ' + str(x[2])+'\n')
with open(testsetname, 'w') as w:
index = 0
for i in range(newdatasize,datasize):
x = ordata[i]
if(int(x[0]) not in rename or int(x[1]) not in rename):
continue
a = str(rename[int(x[0])])
b = str(rename[int(x[1])])
w.write(a + ' ' + b + ' ' + str(x[2])+'\n')
index = index+1
print('预计测试集剩余数量 %d'%(datasize-newdatasize+1))
print('测试集剩余数量 %d'%(index))
temp = 0
with open(rlabelname, 'r') as r:
with open(wlabelname, 'w') as w:
for line in r:
x = line.strip('\n').split()
if(x[0] in all_user):
temp = temp + 1
a = str(rename[int(x[0])])
w.write(a + ' ' + x[1] + '\n')
print("标签集数量 " + str(temp)+ " 个")
if __name__ == '__main__':
run(0.7)
|
5,012 | 26ef7de89e2e38c419310cc66a33d5dc0575fc0d | # Generates an infinite series of odd numbers
def odds():
n = 1
while True:
yield n
n += 2
def pi_series():
odd_nums = odds()
approximation = 0
while True:
approximation += (4 / next(odd_nums))
yield approximation
approximation -= (4 / next(odd_nums))
yield approximation
approx_pi = pi_series()
# The higher the range used here the closer to an acurate approximation of PI.
for x in range(10000):
print(next(approx_pi))
|
5,013 | d386047c087155b1809d47349339eb6882cf8e26 | import stock as stk
import portfolio as portf
import plot
import sys
import cmd
import os
import decision as des
class CLI(cmd.Cmd):
def __init__(self):
cmd.Cmd.__init__(self)
self.prompt = '$> '
self.stk_data_coll = stk.StockDataCollection()
self.add_to_plot_lst = []
self.paralel_count = 10
def do_set_paralel_count(self, arg):
self.paralel_count = arg
def do_get_paralel_count(self, arg):
print self.paralel_count
def help_set_paralel_count(self):
print "syntax: set_paralel_count [NUMBER]",
print "-- update self.paralel_count for load command"
def do_load_collection(self, arg):
self.stk_data_coll.load(conf_file=arg, paralel_count=self.paralel_count)
print "---------------------------------------"
print "Data downloaded for ", arg
def help_load_collection(self):
print "syntax: load [portfolio file]",
print "-- load/updates the tickers from portfolio file"
def do_set_collection(self, arg):
self.stk_data_coll.set_colection(arg)
def help_set_collection(self):
print "syntax: set_collection [portfolio file]",
print "-- set the tickers from portfolio file"
def do_get_collection(self, arg):
print "-----------------------------"
print " Collection from ", self.stk_data_coll.conf_file
print "-----------------------------"
for c in self.stk_data_coll.stk_data_coll:
print c
def do_cleanup(self, arg):
filelist = [ f for f in os.listdir("./data") if f.endswith(".dat") ]
for f in filelist:
os.remove("./data/" + f)
def help_cleanup(self):
print "syntax: cleanup",
print "-- removes all data files"
def do_plot_indexes(self, arg):
indexes = arg.split(',',1)
a_plot = plot.Plot(plot.PlotCellIndex(indexes[0]))
try:
for index in indexes[1:]:
p = plot.PlotCellIndex(index)
a_plot.addSimple(plot.PlotCell((p.data,p.dates)))
finally: a_plot.plot()
def help_plot_indexes(self):
print "syntax: plot_index [index_name1,index_name2,....]",
print "-- plot slimple index from csv"
def do_plot_ticker_indexes(self,arg):
calc = stk.StockCalcIndex(self.stk_data_coll)
sd = stk.StockData()
ticker, indexes, startdate = arg.split()
indexes = indexes.split(',',1)
sd.load(ticker, startdate)
a_plot = plot.Plot(plot.PlotCell((sd.Cs,sd.dates)))
a_plot.addSimple(plot.PlotCell( calc.sma((sd.Cs, sd.dates), 200),overlay=True))
a_plot.addSimple(plot.PlotCell( calc.sma((sd.Cs, sd.dates), 50),overlay=True))
for index in indexes:
p = plot.PlotCellIndex(index)
p.truncate(startdate)
a_plot.addSimple(plot.PlotCell((p.data,p.dates)))
a_plot.addSimple(plot.PlotCell(calc.sma((p.data,p.dates),20)))
a_plot.addSimple(plot.PlotCell(calc.sma((p.data,p.dates),50),overlay=True))
a_plot.plot()
def do_plot_collection(self, arg):
calc = stk.StockCalcIndex(self.stk_data_coll)
sd = stk.StockData()
ticker, startdate = arg.split()
sd.load(ticker, startdate)
a_plot = plot.Plot(plot.PlotCell((sd.Cs,sd.dates)))
a_plot.addSimple(plot.PlotCell( calc.sma((sd.Cs, sd.dates), 200),overlay=True))
a_plot.addSimple(plot.PlotCell( calc.sma((sd.Cs, sd.dates), 50),overlay=True))
a_plot.addSimple(plot.PlotCell( calc.llv((sd.Cs, sd.dates), 100),overlay=True))
a_plot.addSimple(plot.PlotCell( calc.sma((sd.Vs,sd.dates),20 )))
a_plot.addSimple(plot.PlotCell( calc.obv((sd.Cs,sd.Vs,sd.dates) )))
a_plot.addSimple(plot.PlotCell( calc.correlation_adj((sd.Cs,sd.dates))))
a_plot.plot()
def do_plot(self, arg):
calc = stk.StockCalcIndex(self.stk_data_coll)
sd = stk.StockData()
ticker, startdate = arg.split()
sd.load(ticker, startdate)
a_plot = plot.Plot(plot.PlotCell((sd.Cs,sd.dates)))
a_plot.addSimple(plot.PlotCell( calc.sma((sd.Cs, sd.dates), 200),overlay=True))
a_plot.addSimple(plot.PlotCell( calc.sma((sd.Cs, sd.dates), 50),overlay=True))
a_plot.addSimple(plot.PlotCell( calc.llv((sd.Cs, sd.dates), 100),overlay=True))
a_plot.addSimple(plot.PlotCell( calc.sma((sd.Vs,sd.dates),20 )))
arr_obv = calc.obv((sd.Cs,sd.Vs,sd.dates) )
a_plot.addSimple(plot.PlotCell( arr_obv))
a_plot.addSimple(plot.PlotCell( calc.sma(arr_obv, 20),overlay=True))
a_plot.addSimple(plot.PlotCell( calc.sma(arr_obv, 60),overlay=True))
a_plot.plot()
def help_plot(self):
print "syntax: plot [ticker] []|[start date YYYYMMDD]",
print "-- plots the ticker"
def do_simulation(self, arg):
ticker, startdate = arg.split()
calc = stk.StockCalcIndex(self.stk_data_coll)
sd = stk.StockData()
sd.load(ticker, startdate)
port = des.DecisionCollection(ticker, 50000)
decision = des.DecisionSimpleSMA(ticker, (sd.Cs, sd.dates), port)
decision.looper()
print ticker, ":", str(port)
port2 = des.DecisionCollection(ticker, 50000)
decision2 = des.DecisionSimpleStopSMA(ticker, (sd.Cs, sd.dates), port2, risk_factor=0.01, )
decision2.looper()
print ticker, ":", str(port2)
port2.print_all()
a_plot = plot.Plot(plot.PlotCell((sd.Cs,sd.dates)))
a_plot.addSimple(plot.PlotCell( calc.sma((sd.Cs, sd.dates), 200),overlay=True))
a_plot.addSimple(plot.PlotCell( calc.sma((sd.Cs, sd.dates), 50),overlay=True))
a_plot.addSimple(plot.PlotCell( calc.llv((sd.Cs, sd.dates), 100),overlay=True))
a_plot.addSimple(plot.PlotCell( port2.get_enter_plot_cell(), overlay=True, color='go' ))
a_plot.addSimple(plot.PlotCell( port2.get_leave_plot_cell(), overlay=True, color='ro' ))
a_plot.addSimple(plot.PlotCell( port2.get_value_plot_cell()))
a_plot.plot()
def help_simulation(self):
print "syntax: simulation [ticker] []|[start date YYYYMMDD]",
print "-- runs a simulation on a single ticker"
def do_simulation_collection(self, arg ):
for ticker in self.stk_data_coll.stk_data_coll:
sd = stk.StockData()
sd.load(ticker, arg)
port = des.DecisionCollection(ticker, 50000)
decision = des.DecisionSimpleStopSMA(ticker, (sd.Cs, sd.dates), port, risk_factor=0.02, sma_fast=10, sma_slow=50, stop_per=5)
decision.looper()
port4 = des.DecisionCollection(ticker, 50000)
decision4 = des.DecisionSimpleSMA(ticker, (sd.Cs, sd.dates), port4, sma_fast=10, sma_slow=50, stop_per=5)
decision4.looper()
port2 = des.DecisionCollection(ticker, 50000)
decision2 = des.DecisionSimpleSMA(ticker, (sd.Cs, sd.dates), port2)
decision2.looper()
port3 = des.DecisionCollection(ticker, 50000)
decision3 = des.DecisionSimpleStopSMA(ticker, (sd.Cs, sd.dates), port3, risk_factor=0.02, sma_fast=50, sma_slow=200, stop_per=40)
decision3.looper()
print "STOP_FAST - ", ticker, " ", str(port)
print "SIMPLE_FAST - ", ticker, " ", str(port4)
print "STOP_SLOW - ", ticker, " ", str(port3)
print "SIMPLE_SLOW - ", ticker, " ", str(port2)
def emptyline(self):
pass
def do_quit(self, arg):
sys.exit(1)
if __name__ == "__main__":
cli = CLI()
cli.cmdloop()
|
5,014 | 706f8d83bc9b4fab6f6d365c047c33913daece61 | """This module runs cdb on a process and !exploitable on any exceptions.
"""
import ctypes
import logging
import os
from pprint import pformat
from subprocess import Popen
from threading import Timer
import time
from certfuzz.debuggers.debugger_base import Debugger as DebuggerBase
from certfuzz.debuggers.output_parsers.msec_file import MsecFile
import sys
if sys.platform.startswith('win'):
import wmi
logger = logging.getLogger(__name__)
def factory(options):
return MsecDebugger(options)
class MsecDebugger(DebuggerBase):
_platform = 'Windows'
_key = 'msec'
_ext = 'msec'
def __init__(self, program, cmd_args, outfile_base, timeout, watchcpu, exception_depth=0, cdb_command='!exploitable -v', debug_heap=False, ** options):
DebuggerBase.__init__(
self, program, cmd_args, outfile_base, timeout, **options)
self.exception_depth = exception_depth
self.watchcpu = watchcpu
if watchcpu:
self.wmiInterface = wmi.WMI()
self.t = None
self.savedpid = None
self.cdb_command = cdb_command
self.debugheap = debug_heap
def kill(self, pid, returncode):
"""kill function for Win32"""
kernel32 = ctypes.windll.kernel32
handle = kernel32.OpenProcess(1, 1, pid)
ret = kernel32.TerminateProcess(handle, returncode)
kernel32.CloseHandle(handle)
return (0 != ret)
def debugger_app(self):
'''
Returns the name of the debugger application to use in this class
'''
typical = "C:\\Program Files\\Debugging Tools for Windows (x86)\\cdb.exe"
if os.path.exists(typical):
return typical
return 'cdb'
def debugger_test(self):
'''
Returns a command line (as list) that can be run via subprocess.call
to confirm whether the debugger is on the path.
'''
return [self.debugger_app(), '-version']
def _get_cmdline(self, outfile):
cdb_command = '$$Found_with_CERT_BFF_2.8;r;%s;q' % self.cdb_command
args = []
args.append(self.debugger_app())
args.append('-amsec.dll')
if hasattr(self, 'debugheap') and self.debugheap:
# do not use hd, xd options if debugheap is set
pass
else:
args.extend(('-hd', '-xd', 'gp'))
args.extend(('-logo', outfile))
args.extend(('-xd', 'bpe', '-xd', 'wob', '-o', '-G', '-c'))
for self.exception_depth in xrange(0, self.exception_depth):
cdb_command = 'g;' + cdb_command
args.append(cdb_command)
args.append(self.program)
args.extend(self.cmd_args)
for l in pformat(args).splitlines():
logger.debug('dbg_args: %s', l)
return args
def _find_debug_target(self, exename, trycount=5):
pid = None
attempts = 0
foundpid = False
if self.watchcpu:
while attempts < trycount and not foundpid:
for process in self.wmiInterface.Win32_Process(name=exename):
# TODO: What if there's more than one?
pid = process.ProcessID
logger.debug('Found %s PID: %s', exename, pid)
foundpid = True
attempts += 1
if not foundpid and attempts < trycount:
logger.debug('%s not seen yet. Retrying...', exename)
time.sleep(0.1)
if not pid:
logger.debug('Cannot find %s child process!', exename)
return pid
def run_with_timer(self):
# TODO: replace this with subp.run_with_timer()
exename = os.path.basename(self.program)
process_info = {}
child_pid = None
done = False
started = False
args = self._get_cmdline(self.outfile)
p = Popen(args, stdout=open(os.devnull, 'w'), stderr=open(os.devnull, 'w'),
universal_newlines=True)
self.savedpid = p.pid
child_pid = self._find_debug_target(exename, trycount=5)
if child_pid is None and self.watchcpu:
logger.debug('Bailing on debugger iteration')
self.kill(self.savedpid, 99)
return
# create a timer that calls kill() when it expires
self.t = Timer(self.timeout, self.kill, args=[self.savedpid, 99])
self.t.start()
if self.watchcpu:
# This is a race. In some cases, a GUI app could be done before we can even measure it
# TODO: Do something about it
while p.poll() is None and not done and child_pid:
for proc in self.wmiInterface.Win32_PerfRawData_PerfProc_Process(IDProcess=child_pid):
n1, d1 = long(proc.PercentProcessorTime), long(
proc.Timestamp_Sys100NS)
n0, d0 = process_info.get(child_pid, (0, 0))
try:
percent_processor_time = (
float(n1 - n0) / float(d1 - d0)) * 100.0
except ZeroDivisionError:
percent_processor_time = 0.0
process_info[child_pid] = (n1, d1)
logger.debug(
'Process %s CPU usage: %s', child_pid, percent_processor_time)
if percent_processor_time < 0.0000000001:
if started:
logger.debug(
'killing cdb session for %s due to CPU inactivity', child_pid)
done = True
self.kill(self.savedpid, 99)
else:
# Detected CPU usage. Now look for it to drop near zero
started = True
if not done:
time.sleep(0.2)
else:
p.wait()
self.t.cancel()
def go(self):
"""run cdb and process output"""
# For exceptions beyond the first one, put the handled exception number
# in the name
if self.exception_depth > 0:
self.outfile = os.path.splitext(self.outfile)[
0] + '.e' + str(self.exception_depth) + os.path.splitext(self.outfile)[1]
self.run_with_timer()
if not os.path.exists(self.outfile):
# touch it if it doesn't exist
open(self.outfile, 'w').close()
parsed = MsecFile(self.outfile)
for l in pformat(parsed.__dict__).splitlines():
logger.debug('parsed: %s', l)
return parsed
def __exit__(self, etype, value, traceback):
if self.t:
logger.debug('Canceling timer...')
self.t.cancel()
# END MsecDebugger
|
5,015 | 15e1ce95398ff155fe594c3b39936d82d71ab9e2 | import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func, inspect
from flask import Flask, jsonify, render_template, redirect
from flask_pymongo import PyMongo
from config import mongo_password, mongo_username, sql_username, sql_password
from bson.json_util import dumps
# Database Setup
rds_connection_string = f"{sql_username}:{sql_password}@localhost:5432/Pokemon"
engine = create_engine(f'postgresql://{rds_connection_string}')
# Reflect existing database
Base = automap_base()
Base.prepare(engine, reflect=True)
# Save reference to the table
pokemon_sql = Base.classes.pokemon
# Flask Setup
app = Flask(__name__)
#Set up MongoDB Database
app.config['MONGO_URI'] = f'mongodb+srv://MikeAnderson89:{mongo_password}@cluster0-wadjd.mongodb.net/test?retryWrites=true&w=majority'
mongo = PyMongo(app)
@app.route("/")
def index():
#Return the homepage
pokemon_data = mongo.db.pokemon.find_one()
return render_template("index.html", pokemon_data = pokemon_data)
#All Pokemon Stats
@app.route("/stats")
def stats():
session = Session(engine)
stats = session.query(pokemon_sql).all()
pokemon_list =[]
for pokeman in stats:
pokeman = {'Name': pokeman.name,
'Number': pokeman.number,
'Type_1': pokeman.type_1,
'Type_2': pokeman.type_2,
'HP': pokeman.hp,
'Attack': pokeman.attack,
'Defense': pokeman.defense,
'Special_Attack': pokeman.sp_atk,
'Special_Defense': pokeman.sp_def,
'Speed': pokeman.speed,
'Generation': pokeman.generation,
'Legendary': pokeman.legendary}
pokemon_list.append(pokeman)
return jsonify(pokemon_list)
session.close()
#Mongo DB image database
@app.route("/images")
def images():
pokemon_image_db = mongo.db.pokemon.find()
images = []
for image in pokemon_image_db:
image.pop('_id')
images.append(image)
return jsonify(images)
if __name__ == "__main__":
app.run(debug=True)
|
5,016 | ba09dbe3fbca51ece8a7d482324a2dec32e7dc8a | import librosa
import librosa.display
import matplotlib.pyplot as plt
import os
import numpy as np
import time
import multiprocessing as mp
from tempfile import TemporaryFile
class DataSet():
def __init__(self,training_folder):
self.training_folder = training_folder
print("load Data")
def loadMelAndStft(self,filename):
wav, sr = librosa.load(filename)
stft_in = librosa.stft(wav)
mel_in = librosa.feature.melspectrogram(S=stft_in)
stft_in = np.array(stft_in)
mel_in = np.array(mel_in)
mel_in = np.swapaxes(mel_in, 0, 1)
stft_in = np.swapaxes(stft_in, 0, 1)
mel_and_stft = []
input_overlap_per_side = 1
for element in range(mel_in.shape[0]):
if(element > input_overlap_per_side and element < mel_in.shape[0]-input_overlap_per_side):
mel_in_with_overlap = []
for number in range(input_overlap_per_side*2+1):
actual_mel_index = element - input_overlap_per_side + number
mel_in_with_overlap.append(mel_in[actual_mel_index])
mel_in_with_overlap = np.asarray(mel_in_with_overlap, dtype=np.float32).flatten()
stft_in =np.asarray(stft_in, dtype=np.float32)
mel_and_stft.append([mel_in_with_overlap,stft_in[element]])
return mel_and_stft
def readFiles(self,queue,file_list,start,end):
print("start-read-file")
print("start ",start)
print("end ",end)
print("file_list ",str(len(file_list)))
load = []
for filename in file_list[start:end]:
load += self.loadMelAndStft(self.training_folder+filename)
print("Path: " + filename)
queue.put(load)
print("finished")
def main(self):
queue = mp.Queue()
file_list = os.listdir(self.training_folder)
time_before = time.time()
processes = []
file_batch_size = 50
steps= int(len(file_list)/file_batch_size)+1
print(steps)
for file_batch in range(steps):
print("run",file_batch)
start_read = file_batch*file_batch_size
end_read = file_batch*file_batch_size+file_batch_size
if len(file_list) < end_read:
end_read = len(file_list)
process = mp.Process(target=self.readFiles, args=(queue,file_list,start_read,end_read))
processes.append(process)
for process in processes:
print("start process")
process.start()
returns = []
for process in processes:
ret = queue.get() # will block
returns += ret
for process in processes:
process.join()
process.join()
print(len(returns))
print("time difference: ", str(time.time()-time_before))
return returns
|
5,017 | 52dc8a4f9165a88dddc1da16e0adb045c4d851ed | import json
from typing import TYPE_CHECKING
import pytest
from eth_utils import is_checksum_address
from rotkehlchen.globaldb.handler import GlobalDBHandler
from rotkehlchen.types import ChainID
if TYPE_CHECKING:
from rotkehlchen.chain.ethereum.node_inquirer import EthereumInquirer
def test_evm_contracts_data(globaldb):
"""Test that all evm contract entries in the packaged global DB have legal data"""
serialized_chain_ids = [x.serialize_for_db() for x in ChainID]
with globaldb.conn.read_ctx() as cursor:
cursor.execute('SELECT address, chain_id, abi, deployed_block FROM contract_data')
for entry in cursor:
assert is_checksum_address(entry[0])
assert isinstance(entry[1], int) and entry[1] in serialized_chain_ids
assert isinstance(entry[2], int)
assert isinstance(entry[3], int) and entry[3] > 0
def test_evm_abi_data(globaldb):
"""Test that the evm abi entries in the packaged globalDB have legal data"""
abis_set = {0}
with globaldb.conn.read_ctx() as cursor:
cursor.execute('SELECT id, value FROM contract_abi')
for entry in cursor:
assert isinstance(entry[0], int)
# read the abi, and make sure it's the most compressed version it can be
# and that it's unique
assert isinstance(entry[1], str)
json_abi = json.loads(entry[1])
serialized_abi = json.dumps(json_abi, separators=(',', ':'))
assert serialized_abi == entry[1]
assert entry[1] not in abis_set
abis_set.add(entry[1])
@pytest.mark.parametrize('sql_vm_instructions_cb', [2])
def test_fallback_to_packaged_db(ethereum_inquirer: 'EthereumInquirer'):
"""
Test that if a contract / abi is missing in the globaldb, it is searched in the packaged db.
"""
with GlobalDBHandler().conn.read_ctx() as cursor:
# Delete one contract and its abi
cursor.execute(
'SELECT contract_data.address, contract_abi.value FROM contract_data INNER JOIN '
'contract_abi ON contract_data.abi=contract_abi.id WHERE chain_id=1 LIMIT 1',
)
(address, abi) = cursor.fetchone() # There has to be at least one entry
cursor.execute('DELETE FROM contract_data WHERE address=? AND chain_id=1', (address,))
cursor.execute('DELETE FROM contract_abi WHERE value=?', (abi,))
# Now query the contract, let it get to packaged global DB and also see that
# database packaged_db is locked is also not raised
ethereum_inquirer.contracts.contract(address)
with GlobalDBHandler().conn.read_ctx() as cursor:
# Check that the contract and the abi were copied to the global db
cursor.execute(
'SELECT COUNT(*) FROM contract_data INNER JOIN '
'contract_abi ON contract_data.abi=contract_abi.id WHERE chain_id=1 AND '
'contract_data.address=? AND contract_abi.value=?',
(address, abi),
)
assert cursor.fetchone()[0] == 1
|
5,018 | fe45fc6cd16be37b320844c5a8b43a964c016dd1 | # -*- coding: utf-8 -*-
from Clases import Lugar
from Clases import Evento
import Dialogos
import Funciones
puntuacion_necesaria = 10
hp_inicial = 5
eventos = [
Evento("dormir", 2, False, -3, 4, Dialogos.descripciones_eventos[0], Dialogos.descripciones_triunfos[0],
Dialogos.descripciones_castigos[0]),
Evento("cazar", 1, False, -2, 3, Dialogos.descripciones_eventos[1], Dialogos.descripciones_triunfos[1],
Dialogos.descripciones_castigos[1]),
Evento("comer", 2, False, 1, 1, Dialogos.descripciones_eventos[2], Dialogos.descripciones_triunfos[2],
Dialogos.descripciones_castigos[2]),
Evento("hablar", 0, True, -3, 2, Dialogos.descripciones_eventos[3], Dialogos.descripciones_triunfos[3],
Dialogos.descripciones_castigos[3]),
Evento("escalar", 0, True, -4, 3, Dialogos.descripciones_eventos[4], Dialogos.descripciones_triunfos[4],
Dialogos.descripciones_castigos[4]),
Evento("rodear", 0, False, -1, 3, Dialogos.descripciones_eventos[5], Dialogos.descripciones_triunfos[5],
Dialogos.descripciones_castigos[5]),
Evento("pescar", 2, False, -4, 2, Dialogos.descripciones_eventos[6], Dialogos.descripciones_triunfos[6],
Dialogos.descripciones_castigos[6]),
Evento("contar chiste", 0, True, 0, 6, Dialogos.descripciones_eventos[7], Dialogos.descripciones_triunfos[7],
Dialogos.descripciones_castigos[7]),
Evento("comprar", 3, False, 0, 6, Dialogos.descripciones_eventos[8], Dialogos.descripciones_triunfos[8],
Dialogos.descripciones_castigos[8])
]
dormir = eventos[0]
cazar = eventos[1]
comer = eventos[2]
hablar = eventos[3]
escalar = eventos[4]
rodear = eventos[5]
pescar = eventos[6]
contar_chiste = eventos[7]
comprar = eventos[8]
lugares = [
Lugar(1, 20, Dialogos.descripciones_lugares[0], dormir, cazar),
Lugar(21, 40, Dialogos.descripciones_lugares[1], comer, hablar),
Lugar(41, 75, Dialogos.descripciones_lugares[2], escalar, rodear),
Lugar(76, 90, Dialogos.descripciones_lugares[3], dormir, pescar),
Lugar(91, 100, Dialogos.descripciones_lugares[4], contar_chiste, comprar)
]
bosque = lugares[0]
ciudad = lugares[1]
montana = lugares[2]
lago = lugares[3]
viajero = lugares[4]
print(Dialogos.saludo[0])
nombre = input(Dialogos.saludo[1])
edad = int(input(Dialogos.saludo[2]))
print("\nHola", nombre, "tienes,", edad, "años.")
if edad >= 18:
print("¡Tienes edad suficiente para jugar!")
quiere_jugar = input("¿Quieres jugar? ").lower()
if quiere_jugar == "si" or "yes" or "y" or "s":
puede_jugar = True
print("\n¡Comienza la aventura! (HP = 5)\n")
else:
puede_jugar = False
print("Adiós...")
elif edad >= 13:
print("¡Puedes jugar bajo supervisión!")
quiere_jugar = input("¿Quieres jugar? ").lower()
if quiere_jugar == "si":
puede_jugar = True
print("\n¡Comienza la aventura!\n")
else:
puede_jugar = False
print("Adiós...")
else:
puede_jugar = False
print("¡Eres muy joven para jugar!")
print("Adiós...")
while puede_jugar:
puntuacion = 0
hp = hp_inicial
derrota = False
while puntuacion < puntuacion_necesaria and derrota == False:
dado = Funciones.roll_dice(100)
if dado <= 20:
lugar_actual = bosque
elif dado <= 35:
lugar_actual = ciudad
elif dado <= 65:
lugar_actual = montana
elif dado <= 95:
lugar_actual = lago
else:
lugar_actual = viajero
print(lugar_actual.descripcion)
print("a)", lugar_actual.evento_1.nombre, "b)", lugar_actual.evento_2.nombre)
decision = ""
while decision != "a" and decision != "b":
decision = input()
if decision != "a" and decision != "b":
print("Esa opción no existe.")
if decision == "a":
evento_actual = lugar_actual.evento_1
else:
evento_actual = lugar_actual.evento_2
print(evento_actual.descripcion)
(hp, puntuacion, derrota) = Funciones.interactuar(evento_actual, puntuacion, hp, derrota)
print("\n")
Funciones.comprobar_victoria(derrota, puntuacion)
quiere_jugar = input("\n¿Reintentar? ").lower()
print("\n")
if quiere_jugar != "si" and "yes" and "y" and "s":
puede_jugar = False |
5,019 | 4620b52a43f2469ff0350d8ef6548de3a7fe1b55 | # -*- snakemake -*-
#
# CENTIPEDE: Transcription factor footprinting and binding site prediction
# install.packages("CENTIPEDE", repos="http://R-Forge.R-project.org")
#
# http://centipede.uchicago.edu/
#
include: '../ngs.settings.smk'
config_default = {
'bio.ngs.motif.centipede' : {
'options' : '',
},
}
update_config(config_default, config)
config = config_default
|
5,020 | cdc9bc97332a3914415b16f00bc098acc7a02863 | L = "chaine de caractere"
print("parcours par élément")
for e in L :
print("caractere : *"+e+"*")
|
5,021 | 69511933697905fb4f365c895264596f19dc1d8d | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 26 18:39:26 2020
@author: Fanny Fredriksson and Karen Marie Sandø Ambrosen
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from tqdm import tqdm #count ffor loops
import math
from sklearn.model_selection import GridSearchCV, StratifiedKFold
from sklearn import preprocessing
from sklearn.utils import shuffle
from sklearn.linear_model import Lasso
from utils_runOnce_classification import getEgillX, getEgillParameters
from utils_runOnce_classification import significant_connected_areasBAitaSigX, getBAitaSigParameters, getBAitaParameters
import seaborn as sns
from utils_joint import getNewestFolderDate, get_Xy
import pdb
#{}
#[]
##############################################################################
def leaveKout_CV(X, y, n_scz_te, rep, perms, classifiers, parameters, count,
freq_bands, x_size, auc, nz_coef_idx, nz_coef_val, n_BAitaSig = None):
"""
Calculates the leave K out cross validation.
Parameters
----------
X : array of arrays
Matrix containing a vector with all the features for each subject.
Dimension (number of subjects)x(number of features).
y : array
A vector containing the class-information.
Remember: 1 = healty controls, 0 = schizophrenic
n_scz_te : int
Desired number of schizophrenic patients in each test set.
rep : integer
The number of repition that has been used so far.
perms : range(*)
Range with desired number (*) of permutations.
*=1 indicates no permutations.
classifiers : dictionary
Dictionary containing classifiers. E.g. {'lasso' : Lasso(max_iter = 10000)}
parameters : dictionary
Dictionary containing parameters to the classifiers as in "classifiers"
count : integer
Used to know how many loops that have been made due to the pre
allocated space for AUC.
freq_bands : list of strings
Either ['all'] or ['detla','theta','alpha','beta1','beta2','gamma'].
x_size : integer
The size each X has which changes depending on freq_bands.
auc : dictionary
Contains the auc-scores for each loop, either divided into bands or
with the key "all".
nz_coef_idx : dictionary
Contains the non-zero coefficient indices for each loop, either
divided into bands or with the key "all".
nz_coef_val : dictionary
Contains the non-zero coefficient values (the weights) for each
loop, either divided into bands or with the key "all".
n_BAitaSig : list of integers, optional
The number of connections in each band when BAitaSig is used.
The default is None.
Returns
-------
auc : dictionary
Contains the updated auc-scores for each loop, either divided into
bands or with the key "all".
nz_coef_idx : dictionary
Contains the updated non-zero coefficient indices for each loop,
either divided into bands or with the key "all".
nz_coef_val : dictionary
Contains the updated non-zero coefficient values (the weights) for
each loop, either divided into bands or with the key "all".
count : integer
Used to know how many loops that have been made due to the pre
allocated space for AUC.
"""
skf = StratifiedKFold(n_splits=int(sum(y==0)//n_scz_te),shuffle=True, random_state = rep)
count_plt = 0
fig, ax = plt.subplots(2,3 , figsize=(10,6.5))
for tr_idx, te_idx in skf.split(X,y):
# Compute test and train targets
y_tr = np.ravel(y[tr_idx])
y_te = np.ravel(y[te_idx])
# Make gridsearch function
clf_name = list(classifiers.keys())[0]
count += 1
sns.set(font_scale=1.5)
for i in range(1): #range(len(freq_bands)):
if count_plt == 6:
plt.suptitle('Example of line search for the regularization parameter', fontsize= 18)
plt.tight_layout()
plt.subplots_adjust(top = 0.84, bottom = 0.15, hspace = 0.5, wspace = 0.45)
fig.legend(['Train', 'Validation'], bbox_to_anchor = (0.5, 0.89),
borderaxespad = 0., loc = 'upper center', ncol = 2)
plt.show()
fig.savefig('/share/FannyMaster/PythonNew/Figures/LineSearchEx.jpg', bbox_inches = 'tight')
sns.reset_orig()
raise NameError('This is just a dumb way of stopping the code after 6 iterations')
i = 1
clf = GridSearchCV(classifiers[clf_name], {'alpha' :parameters[freq_bands[i]]},
cv = StratifiedKFold(n_splits = int(sum(y_tr==0)//n_scz_te)),
scoring = 'roc_auc', n_jobs = -1, return_train_score=True)
# Compute test and train sets
if n_BAitaSig == None:
X_tr = X[tr_idx, x_size*i:x_size*(i+1)]
X_te = X[te_idx, x_size*i:x_size*(i+1)]
else:
if x_size == sum(n_BAitaSig):
X_tr = X[tr_idx, :]
X_te = X[te_idx, :]
else:
n_temp = [0]
n_temp.extend(np.cumsum(n_BAitaSig))
X_tr = X[tr_idx, n_temp[i]:n_temp[i+1]]
X_te = X[te_idx, n_temp[i]:n_temp[i+1]]
# Standardize
scaler_out = preprocessing.StandardScaler().fit(X_tr)
X_tr = scaler_out.transform(X_tr)
X_te = scaler_out.transform(X_te)
# Fit data and save auc scores
fit = clf.fit(X_tr, y_tr)
auc[freq_bands[i]][count] = fit.score(X_te, y_te)
# Make parameter plot
#plot_grid_search(clf.cv_results_, 'score', parameters[freq_bands[i]], 'log($\lambda$) ' + freq_bands[i])
cv_results = clf.cv_results_
metric = 'score'
grid_param_1 = parameters[freq_bands[i]]
scores_mean = cv_results[('mean_test_' + metric)]
# scores_sd = cv_results[('std_test_' + metric)]
scores_mean_tr = cv_results[('mean_train_' + metric)]
# Set plot style
#plt.style.use('seaborn')
# Plot Grid search scores
sns.set(font_scale=1.5)
df1 = pd.DataFrame({'log($\lambda$)':[math.log(i) for i in grid_param_1], 'CV Average AUC' : scores_mean_tr, 'type' : ['train']*len(scores_mean_tr)})
df2 = pd.DataFrame({'log($\lambda$)':[math.log(i) for i in grid_param_1], 'CV Average AUC' : scores_mean, 'type' : ['test']*len(scores_mean_tr)})
sns.lineplot(x = 'log($\lambda$)', y = 'CV Average AUC', style='type', legend = False, markers = "o", data = df1, ax = ax[count_plt//3][count_plt%3])
sns.lineplot(x = 'log($\lambda$)', y = 'CV Average AUC', style='type', legend = False, markers = "o", data = df2, ax = ax[count_plt//3][count_plt%3])
ax[count_plt//3][count_plt%3].set_xlabel('log($\lambda$)', fontsize=14)
ax[count_plt//3][count_plt%3].set_ylabel('CV Average AUC' , fontsize=14)
#pprint(clf.cv_results_)
#pdb.set_trace() # Type "exit" to get out, type "c" to continue
count_plt += 1
if len(perms) == 1:
coef_idx = np.nonzero(fit.best_estimator_.coef_)
nz_coef_idx[freq_bands[i]].append(coef_idx)
nz_coef_val[freq_bands[i]].append(fit.best_estimator_.coef_[coef_idx])
return auc, nz_coef_idx, nz_coef_val, count
##############################################################################
def CV_classifier(X, y, n_scz_te, reps, separate_bands, perms, dir_save,
classifiers, parameters, n_BAitaSig = None):
"""
Parameters
----------
X : np.array
Matrix with dimension (subjects)x(feature vector).
y : np.array
Vector with classifications (0: healthy, 1: schizo).
n_scz_te : int
Desired number of schizophrenic patients in each test set.
reps : range(*)
Range with desired number (*) of extra times the code should run.
separate_bands : boolean
True = seperate data into frequency bands. False = don't separate.
perms : range(*)
Range with desired number (*) of permutations.
*=1 indicates no permutations.
dir_save : string
Directory path to where the results should be saved.
classifiers : dictionary
Dictionary containing classifiers. E.g. {'lasso' : Lasso(max_iter = 10000)}
parameters : dictionary
Dictionary containing parameters to the classifiers as in "classifiers"
Notes
-------
Saves three different values in the dir_save path:
auc : dictionary
Contains the auc-scores for each loop, either divided into bands or
with the key "all".
nz_coef_idx : dictionary
Contains the non-zero coefficient indices for each loop, either
divided into bands or with the key "all".
nz_coef_val : dictionary
Contains the non-zero coefficient values (the weights) for each
loop, either divided into bands or with the key "all".
"""
# Check if data should be seperated into bands or not:
if separate_bands:
freq_bands = ['delta', 'theta', 'alpha', 'beta1', 'beta2', 'gamma']
else:
freq_bands = ['all']
if len(perms) > 1:
y_org = y
tqdm_perms = tqdm(perms)
tqdm_reps = reps
else:
tqdm_perms = perms
tqdm_reps = tqdm(reps)
# Initialize space for values
auc = {}
nz_coef_idx= {}
nz_coef_val= {}
nb_loops = len(reps)*(sum(y==0)//n_scz_te)*len(perms)
# Define the size of X
x_size = int(X.shape[1]/len(freq_bands))
for i in freq_bands:
auc[i] = np.zeros(nb_loops) # e.g. auc = {'delta':[] , 'theta': [], 'alpha': [], ....}
nz_coef_idx[i] = []
nz_coef_val[i] = []
count = -1
for perm in tqdm_perms:
if len(perms) > 1:
y = shuffle(y_org, random_state=perm).reset_index(drop=True)
for rep in tqdm_reps:
auc, nz_coef_idx, nz_coef_val, count = leaveKout_CV(X, y, n_scz_te, rep,
perms, classifiers, parameters, count,
freq_bands, x_size, auc, nz_coef_idx,
nz_coef_val, n_BAitaSig)
#%%
con_type = 'lps'
separate_bands = True # False = All bands together
partialData = True
atlas = 'BAita' # DKEgill, BAita, BAitaSig
sns.set(font_scale=1.5)
freq_band_type = 'DiLorenzo'
# Directories
dir_folders = r'/share/FannyMaster/PythonNew/' + atlas + '_timeseries_'
newest_date = getNewestFolderDate(dir_folders)
dir_features = dir_folders + newest_date + '/' + freq_band_type + '/Features'
dir_y_ID = r'/share/FannyMaster/PythonNew/Age_Gender.csv'
n_scz_te = 2
reps = range(1)
classifiers = {'lasso' : Lasso(max_iter = 10000)}
dir_save = dir_folders + newest_date + '/' + freq_band_type + '/classificationResults/' + con_type.capitalize()
X,y = get_Xy(dir_features, dir_y_ID, con_type, partialData)
if atlas == 'DKEgill':
X = getEgillX(X)
n_BAitaSig = None
parameters = getEgillParameters(con_type, separate_bands)
elif atlas == 'BAitaSig':
X, n_BAitaSig = significant_connected_areasBAitaSigX(X)
parameters = getBAitaSigParameters(con_type, separate_bands)
elif atlas == 'BAita':
parameters = getBAitaParameters(con_type, separate_bands)
n_BAitaSig = None
perms = range(1) # 1 = No permutations
CV_classifier(X, y, n_scz_te, reps, separate_bands, perms, dir_save,
classifiers, parameters)
|
5,022 | 7d8c2aa5674704d4443034c29bbdc715da9fd567 | """
db.集合.update()
"""
"""
实例 被替换了
> db.test1000.update({'name':'dapeng'},{'name':'大鹏'})
WriteResult({ "nMatched" : 1, "nUpserted" : 0, "nModified" : 1 })
> db.test1000.find()
{ "_id" : ObjectId("5c35549d7ad0cf935d3c150d"), "name" : "大鹏" }
{ "_id" : ObjectId("5c3554f37ad0cf935d3c150e"), "nInserted" : 1 }
{ "_id" : ObjectId("5c3555417ad0cf935d3c150f"), "name" : "kongming", "age" : 12 }
{ "_id" : ObjectId("5c3555457ad0cf935d3c1510"), "name" : "kongming1", "age" : 12 }
{ "_id" : ObjectId("5c3555557ad0cf935d3c1511"), "name" : "kongming1", "age" : 12 }
>
"""
"""
实例2 利用$set:只修改匹配到的值
> db.test1000.update({'name':'kongming'},{$set:{'name':'空明被修改'}})
WriteResult({ "nMatched" : 1, "nUpserted" : 0, "nModified" : 1 })
> db.test1000.find()
{ "_id" : ObjectId("5c35549d7ad0cf935d3c150d"), "name" : "大鹏" }
{ "_id" : ObjectId("5c3554f37ad0cf935d3c150e"), "nInserted" : 1 }
{ "_id" : ObjectId("5c3555417ad0cf935d3c150f"), "name" : "空明被修改", "age" : 12 }
{ "_id" : ObjectId("5c3555457ad0cf935d3c1510"), "name" : "kongming1", "age" : 12 }
{ "_id" : ObjectId("5c3555557ad0cf935d3c1511"), "name" : "kongming1", "age" : 12 }
>
"""
"""
实例3 修改多条
db.test1000.update({'name':'kongming'},{$set:{'name':'空明被修改'}},{multi:true})
""" |
5,023 | c0c0ed31a09f2b49448bc1f3519aa61daaba20af | import sys
input = sys.stdin.readline
from collections import deque
size, num = map(int,input().split())
position = list(map(int,input().split()))
cnt =0
nums = []
for k in range(1,size+1) :
nums.append(k)
size = deque(nums)
position = deque(position)
while position != deque([]) :
if position[0]==1:
size.popleft()
position.popleft()
for i in range(len(position)) :
position[i] -= 1
else :
right = 0
left = 0
if position[0] <= (len(size)+2)//2 :
size.rotate(-1)
cnt +=1
for i in range(len(position)) :
position[i] -= 1
if position[i] <= 0 :
position[i] = len(size)
else :
size.rotate(1)
cnt += 1
for i in range(len(position)) :
position[i] += 1
if position[i] > len(size) :
position[i] = 1
print(cnt) |
5,024 | 351421ef6a40e3a4bd4549a1851fbf4bed9ddf30 | ghj=input("enter your first name:")
print("Welcome to my Quiz:\nIf you go wrong once you lose but if you give all the answers correct then you win but no CHEATING.")
print("Q1:-Who is the president of India?")
winlist=("ramnath govind","multiple choice question","multiple choice questions","mumbai")
enter=input("enter your answer here:")
seat=enter.lower()
x=0
if seat in winlist:
print("woah you surely are smart you are correct!!!!")
x=x+1
else:
print("you went wrong at the first question")
x=x-1
print("Q2:-What is the full form of MCQ?")
enter2=input("enter your answer here:")
seat2=enter2.lower()
if seat2 in winlist:
print("you are right!!!!!!")
x=x+1
else:
print("I told you this is a hard quiz, ur answer is wrong")
x=x-1
print("Q3:-which city is the india's largest city by population")
enter3=input("enter ur answer here:")
seat3=enter3.lower()
if seat3 in winlist:
print("you are right!!!")
x=x+1
else:
print("you were wrong you lose 1 mark")
x=x-1
print("well " +str(ghj)+ " you have completed the quiz and scored: "+str(x)+" marks")
|
5,025 | deff4eb3ae933a99036f39213ceaf2144b682904 | from __future__ import print_function
import re
import sys
from pyspark import SparkContext
# define a regular expression for delimiters
NON_WORDS_DELIMITER = re.compile(r'[^\w\d]+')
def main():
if len(sys.argv) < 2:
print('''Usage: pyspark q2.py <file>
e.g. pyspark q2.py file:///home/cloudera/test_file''')
exit(-1)
sc = SparkContext(appName="HW4_Q2_LC")
try:
n = sc.textFile(sys.argv[1]) \
.filter(lambda x: len(NON_WORDS_DELIMITER.split(x)) > 10).count()
print("=" * 20)
print(" R E S U L T S ")
print("Lines with more than 10 words:", n)
print("=" * 20)
finally:
sc.stop()
if __name__ == '__main__':
main() |
5,026 | 2b9dfd0cfd62276330f1a4f983f318076f329437 | def domain_name(url):
while "https://" in url or "http://" in url or "www." in url:
url = url.replace("https://", ' ') if "https://" in url else url.replace("http://", ' ') if "http://" in url else url.replace("www.", ' ')
url = list(url)
for i in range(len(url)):
if url[i] == ".":
return "".join(url[0:i]).strip()
print(domain_name("https://www.codewars.com/kata/514a024011ea4fb54200004b/train/python"))
|
5,027 | bf3b529f8f06619c94d2dfca283df086466af4ea | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-26 16:51
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0002_auto_20170308_1949'),
]
operations = [
migrations.AlterField(
model_name='deck',
name='description',
field=models.TextField(default=''),
),
]
|
5,028 | 9bc15f063adc7d2a5ea81d090736ab6ce66a03d4 | from django.db import models
from django.utils.safestring import mark_safe
from ondoc.authentication.models import TimeStampedModel, CreatedByModel, Image
import datetime
from django.contrib.contenttypes.models import ContentType
from django.urls import reverse
from ondoc.doctor.models import Doctor, PracticeSpecialization
class ArticleCategory(TimeStampedModel):
name = models.CharField(blank=False, null=False, max_length=500)
identifier = models.CharField(max_length=48, blank=False, null=True)
url = models.CharField(blank=False, null=True, max_length=500, unique=True)
title = models.CharField(max_length=500, null=True, blank=True)
description = models.CharField(max_length=200000, null=True, blank=True)
def __str__(self):
return self.name
class Meta:
db_table = "article_categories"
def save(self, *args, **kwargs):
if hasattr(self, 'url'):
self.url = self.url.strip('/').lower()
super(ArticleCategory, self).save(*args, **kwargs)
class Article(TimeStampedModel, CreatedByModel):
title = models.CharField(blank=False, null=False, max_length=500, unique=True)
url = models.CharField(blank=False, null=True, max_length=500, unique=True)
heading_title = models.CharField(blank=True, null=False, max_length=500)
body = models.CharField(blank=False, null=False, max_length=200000)
category = models.ForeignKey(ArticleCategory, null=True, related_name='articles', on_delete=models.SET_NULL)
header_image = models.ImageField(upload_to='articles/header/images', null=True, blank=True, default='')
header_image_alt = models.CharField(max_length=512, blank=True, null=True, default='')
icon = models.ImageField(upload_to='articles/icons', null=True, blank=True, default='')
is_published = models.BooleanField(default=False, verbose_name='Published')
description = models.CharField(max_length=500, blank=True, null=True)
keywords = models.CharField(max_length=256, blank=True, null=True)
author_name = models.CharField(max_length=256, null=True, blank=True)
author = models.ForeignKey(Doctor, null=True, blank=True, related_name='published_articles', on_delete=models.SET_NULL)
published_date = models.DateField(default=datetime.date.today)
linked_articles = models.ManyToManyField('self', symmetrical=False, through='LinkedArticle',
through_fields=('article', 'linked_article'))
pharmeasy_url = models.TextField(blank=True, null=True)
pharmeasy_product_id = models.PositiveIntegerField(null=True, blank=True)
is_widget_available = models.NullBooleanField()
def get_absolute_url(self):
content_type = ContentType.objects.get_for_model(self)
return reverse('admin:%s_%s_change' % (content_type.app_label, content_type.model), args=[self.id])
def icon_tag(self):
if self.icon:
return mark_safe('<img src="%s" width="150" height="150" />' % (self.icon.url))
return ""
def save(self, *args, **kwargs):
self.published_date = self.published_date if self.published_date else datetime.date.today()
if hasattr(self, 'url'):
self.url = self.url.strip('/').lower()
super().save(*args, **kwargs)
def __str__(self):
return self.title
class Meta:
db_table = "article"
class ArticleImage(TimeStampedModel, CreatedByModel):
name = models.ImageField(upload_to='article/images')
def image_tag(self):
if self.name:
return mark_safe('<img src="%s" width="150" height="150" />' % (self.name.url))
return ""
def __str__(self):
if self.name:
return self.name.url
return ""
class Meta:
db_table = "article_image"
class ArticleContentBox(TimeStampedModel):
name = models.CharField(max_length=1000)
title = models.CharField(max_length=1000)
rank = models.PositiveSmallIntegerField(default=0, blank=True)
def __str__(self):
return self.name
class Meta:
db_table = 'article_content_box'
class ArticleLinkedUrl(TimeStampedModel):
article = models.ForeignKey(Article, on_delete=models.CASCADE)
url = models.CharField(max_length=2000, unique=True)
title = models.CharField(max_length=500)
content_box = models.ForeignKey(ArticleContentBox,null=True, on_delete=models.SET_NULL)
def __str__(self):
return self.title
class Meta:
db_table = 'article_linked_urls'
class LinkedArticle(TimeStampedModel):
article = models.ForeignKey(Article, on_delete=models.CASCADE, related_name='related_articles')
linked_article = models.ForeignKey(Article, on_delete=models.CASCADE, related_name='related_article')
title = models.CharField(max_length=500, null=True, blank=False)
content_box = models.ForeignKey(ArticleContentBox,null=True, on_delete=models.SET_NULL)
def __str__(self):
return "{}-{}".format(self.article.title, self.linked_article.title)
class Meta:
db_table = 'linked_articles'
unique_together = (('article', 'linked_article'),)
class MedicineSpecialization(TimeStampedModel):
medicine = models.ForeignKey(Article, on_delete=models.CASCADE)
specialization = models.ForeignKey(PracticeSpecialization, on_delete=models.CASCADE, null=True,
blank=True)
def __str__(self):
return self.medicine.title + " " + self.specialization.name
class Meta:
db_table = "medicine_specialization"
|
5,029 | cc160b1b0478446ba0daec4a0fe9e63453df3d96 | N = int(input())
A_list = list(map(int,input().split()))
B_list = list(map(int,input().split()))
C_list = list(map(int,input().split()))
ans = 0
for i in range(N):
ans += B_list[A_list[i]-1]
if i < N-1:
if A_list[i]+1==A_list[i+1]:
ans += C_list[A_list[i]-1]
print(ans)
|
5,030 | 4c1fea4dcf143ec976d3956039616963760d5af6 | # add some description here
import glob
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import xarray as xr
import pandas as pd
import os
import pickle
from scipy.interpolate import griddata
from mpl_toolkits.basemap import Basemap
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib import dates
import datetime
import matplotlib
matplotlib.style.use('ggplot')
import sys
sys.path.append('masterThesisPack/')
import masterThesisPack as oceano
BASE_DIR = oceano.make_dir()
DATA_DIR = BASE_DIR.replace('github/', 'ventopcse/data/')
SAVE_DIR = BASE_DIR + 'dissertacao/presentation/figures/'
# importing laje de santos data
raw = xr.open_dataset(DATA_DIR+'Est_lajeSantos/lajesantos.nc')
raw = raw.to_dataframe()
# cut only a period
# raw = raw['2015-04':]
data = raw.copy()
treat = data.copy()
treat[treat > 3*treat.std()] = np.nan
std = treat.wind_along.std()
fig,ax = plt.subplots()
raw.wind_along.plot(ax=ax)
ax.axhline(y=3*std,c='k',ls='dashed')
ax.axhline(y=-3*std,c='k',ls='dashed')
ax.set_ylabel(r'Vento a 10m de altura [m.s$^{-1}$]')
# plt.savefig(SAVE_DIR.replace('github','gitlab') + 'qualityControl.png',dpi=250)
|
5,031 | 6025b8d4015572ea1a760c1b4bc7200a1019c802 | from can.interfaces.ics_neovi.neovi_bus import NeoViBus
|
5,032 | 0ff8743e54509a76e9a7add4be9da279bdee82a6 | class Solution:
def calculate(self, s: str) -> int:
nums = []
ops = []
def cal():
a = nums.pop()
b = nums.pop()
c = ops.pop()
if c == '+':
nums.append(b + a)
elif c == '-':
nums.append(b - a)
elif c == '*':
nums.append(b * a)
else:
nums.append(int(b / a))
i = 0
while i < len(s):
if s[i] == ' ':
i += 1
continue
elif s[i].isdigit():
t = ''
while i < len(s) and s[i].isdigit():
t += s[i]
i += 1
nums.append(int(t))
elif not ops:
ops.append(s[i])
i += 1
elif s[i] == '+' or s[i] == '-':
while ops:
cal()
ops.append(s[i])
i += 1
else:
while ops and (ops[-1] == '*' or ops[-1] == '/'):
cal()
ops.append(s[i])
i += 1
while ops:
cal()
return nums[-1] |
5,033 | d50618f7784e69b46cb665ec1a9c56f7a2867785 | #n-repeated element
class Solution:
def repeatedNTimes(self, A):
freq = {}
for i in A:
if i in freq.keys():
freq[i] += 1
else:
freq[i] = 1
key = list(freq.keys())
val = list(freq.values())
m = max(val)
return key[val.index(m)]
s = Solution()
l = [2,1,2,5,3,2]
k = [1,1,1,2]
print(s.repeatedNTimes(l)) |
5,034 | a8f200e0ae1252df4ad6560e5756347cd0e4c8ba | """
Client component of the Quartjes connector. Use the ClientConnector to create
a connection to the Quartjes server.
Usage
-----
Create an instance of this object with the host and port to connect to.
Call the start() method to establish the connection.
Now the database and the stock_exchange variable can be used to communicate
with the server.
If you do not wish to connect to a server, but run a local server instead,
create the object without any arguments.
Example
-------
>>> conn = ClientConnector("192.168.1.1")
>>> conn.start()
>>> conn.database.get_drinks()
Available server methods
------------------------
Currently two server objects are made available upon connection. Please see the
documentation for the server object for available methods and events:
* database: :class:`quartjes.controllers.database.Database`
* stock_exchange: :class:`quartjes.controllers.stock_exchange.StockExchange`
Advanced
--------
Use the method get_service_interface to retrieve additional interfaces to a server side
service.
As long as the connector is running, it will keep trying to reconnect any
lost connections using an exponential back-off.
ClientConnector class
---------------------
"""
__author__ = "Rob van der Most"
__docformat__ = "restructuredtext en"
from quartjes.connector.protocol import QuartjesClientFactory
from twisted.internet import reactor, threads
from threading import Thread
from quartjes.connector.services import ServiceInterface
import quartjes.controllers.database
import quartjes.controllers.stock_exchange2
class ClientConnector(object):
"""
Client side endpoint of the Quartjes connector.
Parameters
----------
host : string
Host to connect to. If no host is specified, a local server is started.
port : int
Port to connect to.
Attributes
----------
host
port
factory
database
stock_exchange
"""
def __init__(self, host=None, port=None):
self._host = host
if port:
self._port = port
else:
from quartjes.connector.server import default_port
self._port = default_port
self._factory = QuartjesClientFactory()
self._database = None
self._stock_exchange = None
self._connection = None
@property
def host(self):
"""
Hostname to connect to.
Can only be changed when there is no active connection.
"""
return self._host
@host.setter
def host(self, value):
assert not self.is_connected(), "Host should not be changed will connected."
self._host = value
@property
def port(self):
"""
Port to connect to.
Can only be changed when there is no active connection.
"""
return self._port
@port.setter
def port(self, value):
assert not self.is_connected(), "Port should not be changed will connected."
self._port = value
@property
def factory(self):
"""
The protocol factory used by the client to connect to the server.
You normally should not need to access this. It is for advanced options.
"""
return self._factory
@property
def database(self):
"""
Reference to the currently running
:class:`Database <quartjes.controllers.database.Database>`.
This can be a proxy to the database on the server or a local database.
"""
return self._database
@property
def stock_exchange(self):
"""
Reference to the currently running
:class:`StockExchange <quartjes.controllers.stock_exchange.StockExchange>`.
This can be a proxy to the stock exchange on the server or a local stock exchange.
"""
return self._stock_exchange
def start(self):
"""
Start the connector and create a connection to the server. Starts a
reactor loop in a separate thread.
"""
if not self._host:
print("No host selected, starting local instance.")
self._database = quartjes.controllers.database.default_database()
self._stock_exchange = quartjes.controllers.stock_exchange2.StockExchange2()
else:
reactor.callLater(0, self._connect) #@UndefinedVariable
if not reactor.running: #@UndefinedVariable
self._reactor_thread = ClientConnector._ReactorThread()
self._reactor_thread.start()
self._factory.wait_for_connection()
self._database = self.get_service_interface("database")
self._stock_exchange = self.get_service_interface("stock_exchange")
def stop(self):
"""
Stop the connector, closing the connection.
The Reactor loop remains active as the reactor cannot be restarted.
"""
if self._host:
#threads.blockingCallFromThread(reactor, self._factory.stopTrying)
threads.blockingCallFromThread(reactor, self._disconnect)
else:
self._database = None
self._stock_exchange.stop()
self._stock_exchange = None
def get_service_interface(self, service_name):
"""
Construct a service interface for the service with the given name. Use
the service interface to send requests to the corresponding service
on the Quartjes server.
Parameters
----------
service_name : string
Name of the service on the server to which you want a remote
interface.
Returns
-------
service_interface : :class:`quartjes.connector.services.ServiceInterface`
An interface to the service.
Please note that the existence of the service on the server is not
verified until an actual method call has been done.
"""
return ServiceInterface(self._factory, service_name)
def is_connected(self):
"""
Determine whether the connection to the server is active.
A local service is also considered connected.
Returns
-------
connected : boolean
True if connected, False if not.
"""
if not self._host:
if self._database:
return True
else:
return False
else:
return self._factory.is_connected()
def _connect(self):
"""
Internal method called from the reactor to start a new connection.
"""
#print("Connecting...")
self._connection = reactor.connectTCP(self.host, self.port, self.factory) #@UndefinedVariable
def _disconnect(self):
"""
Internal method called from the reactor to shut down a connection.
"""
self._factory.stopTrying()
self._connection.disconnect()
class _ReactorThread(Thread):
"""
Thread for running the reactor loop. This thread runs as a daemon, so
if the main thread and any non daemon threads end, the reactor also
stops running allowing the application to exit.
"""
def __init__(self):
Thread.__init__(self, name="ReactorThread")
self.daemon = True
def run(self):
reactor.run(installSignalHandlers=0) #@UndefinedVariable
def tk_event_listener(F):
"""
Make a method able to receive events from the connector while running in
the TK mainloop.
"""
def listener(self, *pargs, **kwargs):
self._event_queue.put((F, self, pargs, kwargs))
return listener
def tk_prepare_instance_for_events(instance):
"""
Prepare a class to receive events from outside the tk mainloop.
Call this from the TK mainloop before any events are going to be received.
Decorate methods to call using tk_event_listener
"""
def listener():
try:
while 1:
(method, self, pargs, kwargs) = instance._event_queue.get_nowait()
method(self, *pargs, **kwargs)
except Queue.Empty:
pass
instance.after(100, listener)
import Queue
instance._event_queue = Queue.Queue()
instance.after(100, listener)
|
5,035 | fe01b78d29dc456f7a537dd5639bc658fc184e36 | from collections import defaultdict, namedtuple
from color import RGB, clamp
import math
import controls_model as controls
from eyes import Eye, MutableEye
from geom import ALL
#from icicles.ice_geom import ALL
def load_geometry(mapfile):
"""
Load sheep neighbor geometry
Returns a map { panel: [(edge-neighbors), (vertex-neighbors)], ... }
"""
with open(mapfile, 'r') as f:
def blank_or_comment(l):
return l.startswith('#') or len(l) == 0
lines = [l.strip() for l in f.readlines()]
lines = [l for l in lines if not blank_or_comment(l)]
def to_ints(seq):
return [int(x) for x in seq]
def p(raw):
"returns a tuple containing ([a,a,a], [b,b,b]) given a raw string"
raw = raw.strip()
if ' ' not in raw:
return (to_ints(raw.split(',')), None)
else:
# print ">>%s<<" % raw
a,b = raw.split()
return (to_ints(a.split(',')), to_ints(b.split(',')))
dat = {} # defaultdict(list)
for line in lines:
# print line
(num, rest) = line.split(' ', 1)
dat[int(num)] = p(rest.strip())
return dat
_neighbor_map = load_geometry('data/geom.txt')
def edge_neighbors(panel):
"Return the list of panel ids that share an edge with a given panel"
try:
panel = int(panel)
out = _neighbor_map[panel][0]
if out is None:
return []
return out
except Exception, e:
return []
def vertex_neighbors(panel):
"Return the list of panel ids that share a vertex (but not an edge) with a given panel"
try:
panel = int(panel)
out = _neighbor_map[panel][1]
if out is None:
return []
return out
except Exception, e:
return []
##
## Convenience wrapper to pass around three separate sheep objects
##
SheepSides = namedtuple('SheepSides', ['both', 'party', 'business', 'party_eye', 'business_eye'])
def make_sheep(model):
return SheepSides(both=Sheep(model, 'a'),
party=Sheep(model, 'p'),
business=Sheep(model, 'b'),
party_eye=Eye(model, 'p'),
business_eye=Eye(model, 'b'))
def make_eyes_only_sheep(sides):
null = NullSheep()
return SheepSides(both=null, party=null, business=null, party_eye = sides.party_eye, business_eye = sides.business_eye)
def make_mutable_sheep(sides):
return SheepSides(
both=MutableSheep(sides.both),
party=MutableSheep(sides.party),
business=MutableSheep(sides.business),
party_eye=MutableEye(sides.party_eye),
business_eye=MutableEye(sides.business_eye)
)
##
## Sheep class to represent one or both sides of the sheep
##
VALID_SIDES=set(['a', 'b', 'p'])
TEST_COLORS = [
RGB(141,211,199),RGB(255,255,179),RGB(190,186,218),RGB(251,128,114),RGB(128,177,211),RGB(253,180,98),RGB(179,222,105),RGB(252,205,229),RGB(217,217,217),RGB(188,128,189),RGB(204,235,197),RGB(255,237,111)
]
class Sheep(object):
def __init__(self, model, side):
self.model = model
if side not in VALID_SIDES:
raise Exception("%s is not a valid side. use one of a,b,p")
self.side = side
self.cells = set(ALL)
self.cm = None
self.handle_colorized = False
self._brightness = 1.0
def __repr__(self):
return "Sheep(%s, side='%s')" % (self.model, self.side)
def set_brightness(self, val):
self._brightness = val
def all_cells(self):
"Return the list of valid cell IDs"
return ALL
# handle setting both sides here to keep the commands sent
# to the simulator as close as possible to the actual hardware
def _resolve(self, cell):
"""
Translate an integer cell id into a model cell identifier
'a' will be translated into two cells
"""
if cell in self.cells:
if self.side == 'a':
return [str(cell)+'b', str(cell)+'p']
else:
return [str(cell) + self.side]
else:
return []
def set_cell(self, cell, color):
if isinstance(cell, list):
return self.set_cells(cell, color)
# a single set_cell call may result in two panels being set
c = self._resolve(cell)
if not c:
return
if self.handle_colorized and self.cm:
color = color.colorize(self.cm.colorized)
if self._brightness < 1.0:
color = color.copy()
color.v = color.v * self._brightness
# print "setting", c
self.model.set_cells(c, color)
def set_cells(self, cells, color):
if cells is None:
return
resolved = []
for c in cells:
if isinstance(c, list):
for cb in c:
resolved.extend(self._resolve(cb))
else:
resolved.extend(self._resolve(c))
if self.handle_colorized and self.cm:
color = color.colorize(self.cm.colorized)
if self._brightness < 1.0:
color = color.copy()
color.v = color.v * self._brightness
# print "setting", resolved
self.model.set_cells(resolved, color)
def set_all_cells(self, color):
self.set_cells(ALL, color)
def clear(self):
""
self.set_all_cells(RGB(0,0,0))
# AAck! Never call go like this. Let the main loop
# handle the timing!!! :(
# self.go()
def go(self):
self.model.go()
# convenience methods in case you only have a sheep object
def edge_neighbors(self, cell):
return edge_neighbors(cell)
def vertex_neighbors(self, cell):
return vertex_neighbors(cell)
def set_test_colors(self):
ix = 0
for p in ALL:
self.set_cell(p, TEST_COLORS[ix])
ix += 1
if ix == len(TEST_COLORS):
ix = 0
class NullSheep(object):
"""
An implementation of the Sheep side interface that does nothing. This
can be handed to a show which might try to modify it, and thus can run
without crashing, while only the eye modifications are used.
"""
def all_cells(self):
return ALL
def set_cell(self, cell, color):
pass
def set_cells(self, cells, color):
pass
def set_all_cells(self, color):
pass
def clear(self):
pass
def go(self):
pass
def edge_neighbors(self, cell):
return edge_neighbors(cell)
def vertex_neighbors(self, cell):
return vertex_neighbors(cell)
def set_test_colors(self):
pass
class MutableSheep(object):
"""
An implementation of the Sheep side interface which can be muted -
that is, when muted, this sheep will act like the NullSheep, but when
unmuted it will pass things to it's parent
"""
def __init__(self, parent):
self.parent = parent
self.muted = False
def set_cell(self, cell, color):
if self.muted:
return
self.parent.set_cell(cell, color)
def set_cells(self, cells, color):
if self.muted:
return
self.parent.set_cells(cells, color)
def set_all_cells(self, color):
if self.muted:
return
self.parent.set_all_cells(color)
def clear(self):
if self.muted:
return
self.parent.clear()
def go(self):
if self.muted:
return
self.parent.go()
def set_test_colors(self):
self.parent.set_test_colors()
def all_cells(self):
return self.parent.all_cells()
def edge_neighbors(self, cell):
return self.parent.edge_neighbors(cell)
def vertex_neighbors(self, cell):
return self.parent.vertex_neighbors(cell)
|
5,036 | beb536b6d8883daaa7e41da03145dd98aa223cbf | from room import Room
from player import Player
from item import Item
# Declare all the rooms
room = {
'outside': Room("Outside Cave Entrance",
"North of you, the cave mount beckons"),
'foyer': Room("Foyer", """Dim light filters in from the south. Dusty
passages run north and east."""),
'overlook': Room("Grand Overlook", """A steep cliff appears before you, falling
into the darkness. Ahead to the north, a light flickers in
the distance, but there is no way across the chasm."""),
'narrow': Room("Narrow Passage", """The narrow passage bends here from west
to north. The smell of gold permeates the air."""),
'treasure': Room("Treasure Chamber", """You've found the long-lost treasure
chamber! Sadly, it has already been completely emptied by
earlier adventurers. The only exit is to the south."""),
}
# Link rooms together
room['outside'].n_to = room['foyer']
room['foyer'].s_to = room['outside']
room['foyer'].n_to = room['overlook']
room['foyer'].e_to = room['narrow']
room['overlook'].s_to = room['foyer']
room['narrow'].w_to = room['foyer']
room['narrow'].n_to = room['treasure']
room['treasure'].s_to = room['narrow']
# list of items
itemList = {
'Brick': Item('Brick', 'Build settlement and roads'),
'Wood': Item('Wood', 'Build settlement and roads'),
'Sheep': Item('Sheep', 'Build settlement and get development cards'),
'Grain': Item('Grain', 'Build settlement, cities and get development cards'),
'Stone': Item('Stone', 'Build cities and get development cards'),
'DCard': Item('Development Cards', 'Get special powers')
}
# items assignment to rooms
room['outside'].items = [itemList['Brick'], itemList['Wood']]
room['foyer'].items = [itemList['Brick'], itemList['Grain'], itemList['Sheep']]
room['overlook'].items = [itemList['Wood'], itemList['Sheep'], itemList['Grain']]
room['narrow'].items = [itemList['Stone'], itemList['Grain']]
room['treasure'].items = [itemList['Brick'], itemList['Grain'], itemList['Wood']]
#
# Main
#
# Make a new player object that is currently in the 'outside' room.
# Write a loop that:
#
# * Prints the current room name
# * Prints the current description (the textwrap module might be useful here).
# * Waits for user input and decides what to do.
#
# If the user enters a cardinal direction, attempt to move to the room there.
# Print an error message if the movement isn't allowed.
#
# If the user enters "q", quit the game.
# Get Player Name
playerName = input('Hello, What is your name?\n')
# Initialize player with given name
player = Player(playerName, room['outside'])
while True:
# print player item inventory
print('\nPlayer Items:')
for item in player.items:
print('\t', item)
# print current room and items available in the room
print('Room - ', player.current_room)
print('Items in Room:')
for item in player.current_room.items:
print('\t', item)
# Get the User Input
userInput = input('What would you like to do? \n\tEnter [n], [s], [e] or [w] to move across rooms \n\tEnter \"take [item_name]\" or \"drop [item_name]\" to add or remove items \n\tEnter [q] to quit the game\n')
userInputWords = userInput.split(' ')
if userInput == 'q':
print('You chose to quit!')
break
elif len(userInputWords) == 1:
player.move(userInput)
elif len(userInputWords) == 2:
verb = userInputWords[0]
itemName = userInputWords[1]
if itemName in itemList:
player.action(verb, itemList[itemName])
else:
print('Invalid item choice')
|
5,037 | 222a02f97df5ded6fea49e9eb201ed784a2a2423 | #
#
#
##
from __future__ import print_function, unicode_literals
import inspect
import os
import pprint as pp
import time
from time import gmtime, strftime
import subprocess
from local import *
from slurm import *
class Job_status( object ):
""" Enumerate class for job statuses, this is done differently in python 3
"""
FINISHED = 1
FAILED = 2
NO_RESTART = 3
RUNNING = 4
QUEUEING = 5
RESUBMITTED = 6
SUBMITTED = 7
CREATED = 98
KILLED = 99
UNKNOWN = 100
class Job(object):
""" This class is presenting a singular job and all information associated with it.
"""
def __init__(self, cmd, step_name, output=None, limit=None, delete_file=None, thread_id=None):
""" Create a job object
Args:
cmd (str): command to run
step_name (str): name of the step that this command belongs to
output (str): output information to pass on to the next job
limit (str): paramters to pass on to the backend
delete_file (str): File(s) to delete if the job is successful
thread_id (int): id of the thread running this
Returns:
job (obj)
"""
self.status = Job_status.CREATED
self.active = True
self.command = None
self.backend = None
self.output = output
self.step_name = None
self.pre_task_ids = None
self.delete_file = None
self.job_id = None
self.backend_id = None
self.nr_of_tries = 0
self.cmd = cmd
self.step_name = step_name
self.max_memory = None
self.cputime = None
if ( limit is not None ):
self.limit = limit
if ( delete_file is not None ):
self.delete_file = delete_file
if ( thread_id is not None ):
self.thread_id = thread_id
def __getitem__(self, item):
""" Generic getter function
Raises:
AttributeError is raised if trying to access value starting with _ or unknown value
"""
if ( item.startswith("_")):
raise AttributeError
try:
return getattr(self, item)
except KeyError:
raise AttributeError
def __setitem__(self, item, value):
""" Generic setter function
Raises:
AttributeError is raised if trying to access value starting with _ or unknown value
"""
if ( item.startswith("_")):
raise AttributeError
try:
return setattr(self, item, value)
except KeyError:
raise AttributeError
def __repr__(self):
return "{name} -> {status}".format( name=self.step_name, status=self.status )
def __str__(self):
return "{name}".format( name=self.step_name )
def delete_tmp_files(self):
""" deletes tmp files
Args:
None
Returns:
boolean: Success/failure
Raises:
None
"""
if self.delete_file is None:
return True
if ( isinstance(self.delete_file, str)):
self.delete_file = [ self.delete_file ]
for file_name in self.delete_file:
print( file_name)
if ( os.path.isfile( file_name )):
os.remove( file_name )
return True
class Thread( object):
def __init__( self, name, thread_id ):
self.name = name
self.thread_id = thread_id
def __getitem__(self, item):
""" Generic getter function
Raises:
AttributeError is raised if trying to access value starting with _ or unknown value
"""
if ( item.startswith("_")):
raise AttributeError
try:
return getattr(self, item)
except KeyError:
raise AttributeError
def __setitem__(self, item, value):
""" Generic setter function
Raises:
AttributeError is raised if trying to access value starting with _ or unknown value
"""
if ( item.startswith("_")):
raise AttributeError
try:
return setattr(self, item, value)
except KeyError:
raise AttributeError
class Manager( object ):
def __init__(self, pipeline):
""" Creates a manager object
"""
self._jobs = []
self._active_jobs = []
self._threads = []
self._thread_index = {}
self._thread_id = 1
self.local_backend = Local()
self.backend = None
self.pipeline = pipeline
def __getitem__(self, item):
""" Generic getter function
Raises:
AttributeError is raised if trying to access value starting with _ or unknown value
"""
if ( item.startswith("_")):
raise AttributeError
try:
return getattr(self, item)
except KeyError:
raise AttributeError
def __setitem__(self, item, value):
""" Generic setter function
Raises:
AttributeError is raised if trying to access value starting with _ or unknown value
"""
if ( item.startswith("_")):
raise AttributeError
try:
return setattr(self, item, value)
except KeyError:
raise AttributeError
def add_thread(self, name):
""" Create a new thread object for the manager
Args:
name (str): name of the thread
Returns:
None
"""
thread = Thread( name=name, thread_id=self._thread_id)
self._threads.append( thread )
self._thread_index[ name ] = self._thread_id
self._thread_id += 1
def get_thread_by_name( self, name):
""" gets a thread object based on name
Args:
name (str): name of the thread
Returns:
thread (obj)
Raises:
raises an assert error if the thead does not exist
"""
assert name in self._thread_index, "No thread named {}".format( name )
return self._threads[ self._thread_index[ name ]]
def submit_job(self, cmd, step_name, output=None, limit=None, delete_file=None, thread_id=None, system_call=False):
""" Submits a job using the selected backend, setting up the tracking and all that jazz
Args:
cmd (str): command to run
step_name (str): name of the step that this command belongs to
output (str): output information to pass on to the next job
limit (str): paramters to pass on to the backend
delete_file (str): File(s) to delete if the job is successful
thread_id (int): id of the thread running this
system_call (bool): run the job as a system job (default: false )
Returns:
None
"""
job = Job(cmd, step_name, output, limit, delete_file, thread_id)
self._jobs.append( job )
job.job_id = len( self._jobs) - 1
# print( "Working on: '{}' -> {}".format( job.step_name, job.cmd ))
if ( system_call ) :
job = self.local_backend.system_call( job )
else:
job = self.backend.submit( job )
# print( job.status )
def resubmit_job(self, job):
""" resubmits a job
Args:
job (job): jobid to resubmit
"""
job.nr_of_tries += 1
job.status = Job_status.RESUBMITTED
job = self.backend.submit( job )
def killall(self):
"""kills all submitted/running jobs
"""
for job_id, job in self.jobs:
backend.kill( job )
def job_outputs( self, step_name=None):
"""
Args:
step_name (str): name of the step to collect outputs from
Returns:
list of outputs
"""
outputs = []
prev_steps = self.pipeline._workflow.prev_steps( step_name )
# print("{} :: Prev steps to collect outputs from: {}".format( step_name, prev_steps))
for job in self._jobs:
if job.step_name in prev_steps:
outputs.append( job.output )
# print("{}".format( outputs))
return outputs
def format_memory(self, memory):
""" Format memory into a more readable format
Args:
memory (int): will be cast to float anyway
Returns
Readable memory (str)
"""
memory = float( memory)
if memory is None or memory == 0:
return "N/A"
elif ( memory > 1000000000):
return "{:.2f}GB".format(memory / 1000000000)
elif ( memory > 1000000):
return "{:.2f}MB".format(memory / 1000000)
elif ( memory > 1000):
return "{:.2f}KB".format(memory / 1000)
else:
return "{:}".format(int(memory))
def format_time( self, seconds):
""" Markes seconds into a more readable format eg: 10:03:01
Args
Seconds (int): seconds to convert into hours:mins:seconds
returns:
time (str)
"""
if seconds is None:
return "N/A"
seconds = int( seconds )
hours = int(seconds / 3600)
seconds -= hours * 3600
minutes = int(seconds / 60)
seconds -= minutes * 60
seconds = int(seconds )
return "{:02}:{:02}:{:02}".format( hours, minutes, seconds)
def report(self):
""" print the current progress
Args:
None
Returns:
None
"""
job_summary = {}
for job in self._jobs:
if job.step_name not in job_summary:
job_summary[ job.step_name ] = {}
job_summary[ job.step_name ][ 'DONE' ] = 0
job_summary[ job.step_name ][ 'RUNNING' ] = 0
job_summary[ job.step_name ][ 'QUEUING' ] = 0
job_summary[ job.step_name ][ 'FAILED' ] = 0
job_summary[ job.step_name ][ 'UNKNOWN' ] = 0
job_summary[ job.step_name ][ 'max_mem' ] = 0
job_summary[ job.step_name ][ 'cputime' ] = 0
if job.status == Job_status.FINISHED:
job_summary[ job.step_name ][ 'DONE' ] += 1
if job.cputime is not None:
job_summary[ job.step_name ]['cputime'] += int(job.cputime)
if job.max_memory is not None and job.max_memory > job_summary[ job.step_name ][ 'max_mem']:
job_summary[ job.step_name ][ 'max_mem'] = int(job.max_memory)
elif job.status == Job_status.RUNNING:
job_summary[ job.step_name ][ 'RUNNING' ] += 1
elif job.status == Job_status.QUEUEING or job.status == Job_status.SUBMITTED:
job_summary[ job.step_name ][ 'QUEUING' ] += 1
elif job.status == Job_status.FAILED or job.status == Job_status.NO_RESTART:
job_summary[ job.step_name ][ 'FAILED' ] += 1
else:
job_summary[ job.step_name ][ 'UNKNOWN' ] += 1
local_time = strftime("%d/%m/%Y %H:%M", time.localtime())
pickle_file = "{}.{}".format(self.pipeline.project_name, self.pipeline._pid)
print("[{} @{} {}]".format( local_time,self.pipeline._hostname , pickle_file))
print("{:20} || {:12} || {:12} || {:2s} {:2s} {:2s} {:2s} {:2s}".format("Run stats", "Runtime", "Max Mem", "D","R","Q","F","U"))
for step in sorted(self.pipeline._workflow._analysis_order, key=self.pipeline._workflow._analysis_order.__getitem__):
if step not in job_summary:
continue
print("{:20} || {:12} || {:12} || {:02d}/{:02d}/{:02d}/{:02d}/{:02d}".format(step,
self.format_time(job_summary[ step ]['cputime']),
self.format_memory(job_summary[ step ]['max_mem']),
job_summary[ step ][ 'DONE' ],
job_summary[ step ][ 'RUNNING' ],
job_summary[ step ][ 'QUEUING' ],
job_summary[ step ][ 'FAILED' ],
job_summary[ step ][ 'UNKNOWN' ]))
def active_jobs(self):
""" updates the status of and returns all active jobs
Args:
None
Returns:
list of jobs (obj)
"""
active_jobs = []
for job in self._jobs:
if job.active:
job.backend.status( job )
active_jobs.append( job )
self._active_jobs = active_jobs[:]
return active_jobs
def waiting_for_job(self, depends_on ):
""" check if any of the running jobs are in the depends list
Args:
depends_on (list obj): list of steps to check again
Returns:
boolean, True if outstanding dependencies
"""
# This code is aweful, but I don't have to time and brain
# power to fix it right now
for depend_on in depends_on:
for active_job in self._active_jobs:
if (active_job.active and
depend_on.name == active_job.step_name ):
# print("waiting on {}".format(active_job.step_name))
return True
for depend_on in depends_on:
job_found = False
for job in self._jobs:
if (depend_on.name == job.step_name ):
job_found = True
if not job_found:
print("{} is waiting to start and finish {}".format( job.step_name, depend_on.name ))
return True
# We are not waiting for any active or steps yet to be performed
return False
def failed_dependency_jobs(self, depends_on ):
""" check if any of the running jobs this one depends on have failed.
Args:
depends_on (list obj): list of steps to check again
Returns:
boolean, True if one or more job has failed and cannot be restarted
"""
for depend_on in depends_on:
for active_job in self._active_jobs:
if (active_job.status == Job_status.NO_RESTART):
print("dependecy {} failed".format(active_job.step_name))
return True
return False
def _next_id():
''' generates and returns the next job id from the class
Returns:
Next available job id (int)
'''
self.job_id += 1
return self.job_id
|
5,038 | 0cef70b8d661fe01ef4a1eda83a21e1186419a0d | # coding: utf-8
"""
Created on Mon Oct 29 12:57:40 2018
@authors Jzhu, Lrasmy , Xin128 @ DeguiZhi Lab - UTHealth SBMI
Last updated Feb 20 2020
"""
#general utilities
from __future__ import print_function, division
from tabulate import tabulate
import numpy as np
import random
import matplotlib.pyplot as plt
try:
import cPickle as pickle
except:
import pickle
import warnings
from torch.autograd import Variable
import torch.nn.functional as F
import torch.nn as nn
warnings.filterwarnings("ignore")
plt.ion()
#torch libraries
import torch
from torch.utils.data import Dataset, DataLoader
use_cuda = torch.cuda.is_available()
#use_cuda=False
# Dataset class loaded from pickles
class EHRdataFromPickles(Dataset):
def __init__(self, root_dir, file = None, transform=None, sort = True, model='RNN', test_ratio = 0, valid_ratio = 0):
"""
Args:
1) root_dir (string): Path to pickled file(s).
The directory contains the directory to file(s): specify 'file'
please create separate instances from this object if your data is split into train, validation and test files.
2) data should have the format: pickled, 4 layer of lists, a single patient's history should look at this (use .__getitem__(someindex, seeDescription = True))
[310062,
0,
[[[0],[7, 364, 8, 30, 10, 240, 20, 212, 209, 5, 167, 153, 15, 3027, 11, 596]],
[[66], [590, 596, 153, 8, 30, 11, 10, 240, 20, 175, 190, 15, 7, 5, 183, 62]],
[[455],[120, 30, 364, 153, 370, 797, 8, 11, 5, 169, 167, 7, 240, 190, 172, 205, 124, 15]]]]
where 310062: patient id,
0: no heart failure
[0]: visit time indicator (first one), [7, 364, 8, 30, 10, 240, 20, 212, 209, 5, 167, 153, 15, 3027, 11, 596]: visit codes.
3)transform (optional): Optional transform to be applied on a sample. Data augmentation related.
4)test_ratio, valid_ratio: ratios for splitting the data if needed.
"""
self.file = None
if file != None:
self.file = file
self.data = pickle.load(open(root_dir + file, 'rb'), encoding='bytes')
if sort:
self.data.sort(key=lambda pt:len(pt[2]),reverse=True)
self.test_ratio = test_ratio
self.valid_ratio = valid_ratio
else:
print('No file specified')
self.root_dir = root_dir
self.transform = transform
def __splitdata__(self, sort = True):
random.seed(3)
random.shuffle(self.data)
dataSize = len(self.data)
nTest = int(self.test_ratio * dataSize)
nValid = int(self.valid_ratio * dataSize)
test= self.data[:nTest]
valid = self.data[nTest:nTest+nValid]
train = self.data[nTest+nValid:]
if sort:
#sort train, validation and test again
test.sort(key=lambda pt:len(pt[2]),reverse=True)
valid.sort(key=lambda pt:len(pt[2]),reverse=True)
train.sort(key=lambda pt:len(pt[2]),reverse=True)
return train, test, valid
def __getitem__(self, idx, seeDescription = False):
'''
Return the patient data of index: idx of a 4-layer list
patient_id (pt_sk);
label: 0 for no, 1 for yes;
visit_time: int indicator of the time elapsed from the previous visit, so first visit_time for each patient is always [0];
visit_codes: codes for each visit.
'''
if self.file != None:
sample = self.data[idx]
else:
print('No file specified')
if self.transform:
sample = self.transform(sample)
vistc = np.asarray(sample[2])
desc = {'patient_id': sample[0], 'label': sample[1], 'visit_time': vistc[:,0],'visit_codes':vistc[:,1]}
if seeDescription:
'''
if this is True:
You will get a descriptipn of what each part of data stands for
'''
print(tabulate([['patient_id', desc['patient_id']], ['label', desc['label']],
['visit_time', desc['visit_time']], ['visit_codes', desc['visit_codes']]],
headers=['data_description', 'data'], tablefmt='orgtbl'))
#print('\n Raw sample of index :', str(idx))
return sample
def __len__(self):
'''
just the length of data
'''
if self.file != None:
return len(self.data)
else:
print('No file specified')
# Dataset class from already loaded pickled lists
class EHRdataFromLoadedPickles(Dataset):
def __init__(self, loaded_list, transform=None, sort = True, model='RNN'):
"""
Args:
1) loaded_list from pickled file
2) data should have the format: pickled, 4 layer of lists, a single patient's history should look at this (use .__getitem__(someindex, seeDescription = True))
[310062,
0,
[[[0],[7, 364, 8, 30, 10, 240, 20, 212, 209, 5, 167, 153, 15, 3027, 11, 596]],
[[66], [590, 596, 153, 8, 30, 11, 10, 240, 20, 175, 190, 15, 7, 5, 183, 62]],
[[455],[120, 30, 364, 153, 370, 797, 8, 11, 5, 169, 167, 7, 240, 190, 172, 205, 124, 15]]]]
where 310062: patient id,
0: no heart failure
[0]: visit time indicator (first one), [7, 364, 8, 30, 10, 240, 20, 212, 209, 5, 167, 153, 15, 3027, 11, 596]: visit codes.
3)transform (optional): Optional transform to be applied on a sample. Data augmentation related.
4)test_ratio, valid_ratio: ratios for splitting the data if needed.
"""
self.data = loaded_list
if sort:
self.data.sort(key=lambda pt:len(pt[2]),reverse=True)
self.transform = transform
def __getitem__(self, idx, seeDescription = False):
'''
Return the patient data of index: idx of a 4-layer list
patient_id (pt_sk);
label: 0 for no, 1 for yes;
visit_time: int indicator of the time elapsed from the previous visit, so first visit_time for each patient is always [0];
visit_codes: codes for each visit.
'''
sample = self.data[idx]
if self.transform:
sample = self.transform(sample)
vistc = np.asarray(sample[2])
desc = {'patient_id': sample[0], 'label': sample[1], 'visit_time': vistc[:,0],'visit_codes':vistc[:,1]}
if seeDescription:
'''
if this is True:
You will get a descriptipn of what each part of data stands for
'''
print(tabulate([['patient_id', desc['patient_id']], ['label', desc['label']],
['visit_time', desc['visit_time']], ['visit_codes', desc['visit_codes']]],
headers=['data_description', 'data'], tablefmt='orgtbl'))
#print('\n Raw sample of index :', str(idx))
return sample
def __len__(self):
return len(self.data)
def preprocess(batch,pack_pad,surv): ### LR Sep 30 20 added surv_m
# Check cuda availability
if use_cuda:
flt_typ=torch.cuda.FloatTensor
lnt_typ=torch.cuda.LongTensor
else:
lnt_typ=torch.LongTensor
flt_typ=torch.FloatTensor
mb=[]
mtd=[]
lbt=[]
seq_l=[]
bsize=len(batch) ## number of patients in minibatch
lp= len(max(batch, key=lambda xmb: len(xmb[-1]))[-1]) ## maximum number of visits per patients in minibatch
llv=0
for x in batch:
lv= len(max(x[-1], key=lambda xmb: len(xmb[1]))[1])
if llv < lv:
llv=lv # max number of codes per visit in minibatch
for pt in batch:
sk,label,ehr_seq_l = pt
lpx=len(ehr_seq_l) ## no of visits in pt record
seq_l.append(lpx)
if surv: lbt.append(Variable(flt_typ([label])))### LR Sep 30 20 added surv_m
else: lbt.append(Variable(flt_typ([[float(label)]])))
ehr_seq_tl=[]
time_dim=[]
for ehr_seq in ehr_seq_l:
pd=(0, (llv -len(ehr_seq[1])))
result = F.pad(torch.from_numpy(np.asarray(ehr_seq[1],dtype=int)).type(lnt_typ),pd,"constant", 0)
ehr_seq_tl.append(result)
time_dim.append(Variable(torch.from_numpy(np.asarray(ehr_seq[0],dtype=int)).type(flt_typ)))
ehr_seq_t= Variable(torch.stack(ehr_seq_tl,0))
lpp= lp-lpx ## diffence between max seq in minibatch and cnt of patient visits
if pack_pad:
zp= nn.ZeroPad2d((0,0,0,lpp)) ## (0,0,0,lpp) when use the pack padded seq and (0,0,lpp,0) otherwise.
else:
zp= nn.ZeroPad2d((0,0,lpp,0))
ehr_seq_t= zp(ehr_seq_t) ## zero pad the visits med codes
mb.append(ehr_seq_t)
time_dim_v= Variable(torch.stack(time_dim,0))
time_dim_pv= zp(time_dim_v) ## zero pad the visits time diff codes
mtd.append(time_dim_pv)
lbt_t= Variable(torch.stack(lbt,0))
mb_t= Variable(torch.stack(mb,0))
if use_cuda:
mb_t.cuda()
lbt_t.cuda()
return mb_t, lbt_t,seq_l, mtd
def preprocess_multilabel(batch,pack_pad): ### LR Feb 18 21 for multi-label
# Check cuda availability
if use_cuda:
flt_typ=torch.cuda.FloatTensor
lnt_typ=torch.cuda.LongTensor
else:
lnt_typ=torch.LongTensor
flt_typ=torch.FloatTensor
mb=[]
mtd=[]
lbt=[]
seq_l=[]
bsize=len(batch) ## number of patients in minibatch
lp= len(max(batch, key=lambda xmb: len(xmb[-1]))[-1]) ## maximum number of visits per patients in minibatch
llv=0
for x in batch:
lv= len(max(x[-1], key=lambda xmb: len(xmb[1]))[1])
if llv < lv:
llv=lv # max number of codes per visit in minibatch
for pt in batch:
sk,label,ehr_seq_l = pt
lpx=len(ehr_seq_l) ## no of visits in pt record
seq_l.append(lpx)
lbt.append(Variable(flt_typ([label])))### LR Sep 30 20 added surv_m
ehr_seq_tl=[]
time_dim=[]
for ehr_seq in ehr_seq_l:
pd=(0, (llv -len(ehr_seq[1])))
result = F.pad(torch.from_numpy(np.asarray(ehr_seq[1],dtype=int)).type(lnt_typ),pd,"constant", 0)
ehr_seq_tl.append(result)
time_dim.append(Variable(torch.from_numpy(np.asarray(ehr_seq[0],dtype=int)).type(flt_typ)))
ehr_seq_t= Variable(torch.stack(ehr_seq_tl,0))
lpp= lp-lpx ## diffence between max seq in minibatch and cnt of patient visits
if pack_pad:
zp= nn.ZeroPad2d((0,0,0,lpp)) ## (0,0,0,lpp) when use the pack padded seq and (0,0,lpp,0) otherwise.
else:
zp= nn.ZeroPad2d((0,0,lpp,0))
ehr_seq_t= zp(ehr_seq_t) ## zero pad the visits med codes
mb.append(ehr_seq_t)
time_dim_v= Variable(torch.stack(time_dim,0))
time_dim_pv= zp(time_dim_v) ## zero pad the visits time diff codes
mtd.append(time_dim_pv)
lbt_t= Variable(torch.stack(lbt,0))
mb_t= Variable(torch.stack(mb,0))
if use_cuda:
mb_t.cuda()
lbt_t.cuda()
return mb_t, lbt_t,seq_l, mtd
#customized parts for EHRdataloader
def my_collate(batch):
if multilabel_m : mb_t, lbt_t,seq_l, mtd =preprocess_multilabel(batch,pack_pad) ### LR Sep 30 20 added surv_m
else: mb_t, lbt_t,seq_l, mtd = preprocess(batch,pack_pad,surv_m) ### LR Sep 30 20 added surv_m
return [mb_t, lbt_t,seq_l, mtd]
def iter_batch2(iterable, samplesize):
results = []
iterator = iter(iterable)
# Fill in the first samplesize elements:
for _ in range(samplesize):
results.append(iterator.__next__())
random.shuffle(results)
return results
class EHRdataloader(DataLoader):
def __init__(self, dataset, batch_size=128, shuffle=False, sampler=None, batch_sampler=None,
num_workers=0, collate_fn=my_collate, pin_memory=False, drop_last=False,
timeout=0, worker_init_fn=None, packPadMode = False , surv=False,multilbl=False): ### LR Sep 30 20 added surv
DataLoader.__init__(self, dataset, batch_size=batch_size, shuffle=False, sampler=None, batch_sampler=None,
num_workers=0, collate_fn=my_collate, pin_memory=False, drop_last=False,
timeout=0, worker_init_fn=None)
self.collate_fn = collate_fn
global pack_pad
global surv_m ### LR Sep 30 20 added surv_m
global multilabel_m
pack_pad = packPadMode
surv_m=surv ### LR Sep 30 20 added surv_m
multilabel_m=multilbl
if multilabel_m : print('multilabel data processing')
########END of main contents of EHRDataloader############
|
5,039 | c2e9a93861080be616b6d833a9343f1a2f018a0b | def presses(phrase):
keyboard = [
'1',
'ABC2',
'DEF3',
'GHI4',
'JKL5',
'MNO6',
'PQRS7',
'TUV8',
'WXYZ9',
'*',
' 0',
'#'
]
amount = 0
for lttr in phrase.upper():
for key in keyboard:
try:
i = key.index(lttr)
i += 1
amount += i
except ValueError:
pass
return amount |
5,040 | 866ec11f6fe13fb2283709128376080afc7493bf | from datetime import datetime
import httplib2
from apiclient.discovery import build
from flask_login import UserMixin
from flask_migrate import Migrate
from flask_sqlalchemy import SQLAlchemy
from oauth2client.client import OAuth2Credentials
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.types import ARRAY
from app import app
db = SQLAlchemy(app)
migrate = Migrate(app, db)
class User(db.Model, UserMixin):
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.Text)
history_id = db.Column(db.Integer)
customer_label_id = db.Column(db.Text)
credentials_json = db.Column(JSONB)
threads = db.relationship('Thread', backref='user', lazy='dynamic')
def __repr__(self):
return '<User {}>'.format(self.email)
@property
def credentials(self):
if self.credentials_json:
return OAuth2Credentials.from_json(self.credentials_json)
else:
return None
@credentials.setter
def credentials(self, cred):
if type(cred) is OAuth2Credentials:
self.credentials_json = cred.to_json()
else:
self.credentials_json = cred
@property
def gmail(self):
http = self.credentials.authorize(httplib2.Http())
return build('gmail', 'v1', http=http)
def sync_inbox(self):
labels = self.gmail.users().labels().list(userId='me').execute()['labels']
if len([label for label in labels if label['name'] == 'Growth']) == 0:
raise Exception('No Growth label found')
for label in labels:
if label['name'] == 'Growth':
self.customer_label_id = label['id']
db.session.add(self)
db.session.commit()
next_page_token = None
while True:
thread_result = self.gmail.users().threads().list(userId='me', labelIds=self.customer_label_id, pageToken=next_page_token).execute()
for thread in thread_result['threads']:
for message in self.gmail.users().threads().get(userId='me', id=thread['id']).execute()['messages']:
data = self.gmail.users().messages().get(userId='me', id=message['id'], format='metadata').execute()
msg = Message(
gmail_id=data['id'],
internal_date=datetime.fromtimestamp(int(data['internalDate']) / 1e3),
snippet=data['snippet'],
subject=[x for x in data['payload']['headers'] if x['name'] == 'Subject'][0]['value'],
sender=[x for x in data['payload']['headers'] if x['name'] == 'From'][0]['value'],
recipient=[x for x in data['payload']['headers'] if x['name'] == 'To'][0]['value'],
)
thread = Thread.query.filter_by(gmail_id=data['threadId']).first()
if not thread:
thread = Thread(gmail_id=data['threadId'], user_id=self.id,)
msg.thread = thread
db.session.add(msg)
db.session.add(thread)
if thread_result.get('nextPageToken'):
next_page_token = thread_result['nextPageToken']
else:
db.session.commit()
break
# pull history_id
# save latest
# setup notifications
class Message(db.Model):
id = db.Column(db.Integer, primary_key=True)
gmail_id = db.Column(db.Text)
internal_date = db.Column(db.DateTime, nullable=False)
snippet = db.Column(db.Text)
sender = db.Column(db.Text)
recipient = db.Column(db.Text)
cc = db.Column(db.Text)
bcc = db.Column(db.Text)
subject = db.Column(db.Text)
thread_id = db.Column(db.Integer, db.ForeignKey('thread.id'), nullable=False)
class Thread(db.Model):
id = db.Column(db.Integer, primary_key=True)
gmail_id = db.Column(db.Text)
snippet = db.Column(db.Text)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
messages = db.relationship('Message', backref='thread', lazy='dynamic')
|
5,041 | 1c668cf6f145b85a09b248fefda46e928de64e41 | from django.shortcuts import render
from rest_framework import status, viewsets , response
from . import models
from . import serializers
# Create your views here.
class TodoViewset(viewsets.ModelViewSet):
queryset = models.Todo.objects.all()
serializer_class = serializers.TodoSerializer
|
5,042 | d7ff5bf5d8f397500fcac30b73f469316c908f15 | def divisible_by(numbers, divisor):
res = []
for e in numbers:
if e % divisor == 0:
res.append(e)
return res
|
5,043 | 67385d6d58cc79037660be546d41ea9ba1f790fa | from datetime import date
def solution(mon: int, day: int) -> str:
return date(2016, mon, day).strftime("%a").upper()
|
5,044 | f1ca3d7ff7efcf500f1a16e415b13c47fd08688d | # 30_Days_Of_Code
# Day 2
# Boolean
print(True)
print(False)
|
5,045 | 6d0b9523668bd0b302fdbc196d3d7ff25be10b23 | def clear_firefox_driver_session(firefox_driver):
firefox_driver.delete_all_cookies()
# Note this only works if the browser is set to a location.
firefox_driver.execute_script('window.localStorage.clear();')
firefox_driver.execute_script('window.sessionStorage.clear();')
class LocationNotSet(Exception):
pass
|
5,046 | d36552cc589b03008dc9edab8d7e4a003e26bd21 | from __future__ import print_function
import tensorflow as tf
# from keras.callbacks import ModelCheckpoint
from data import load_train_data
from utils import *
import os
create_paths()
log_file = open(global_path + "logs/log_file.txt", 'a')
X_train, y_train = load_train_data()
labeled_index = np.arange(0, nb_labeled)
unlabeled_index = np.arange(nb_labeled, len(X_train))
model = get_unet(dropout=True)
if os.path.exists(initial_weights_path):
model.load_weights(initial_weights_path)
if initial_train:
model_checkpoint = tf.keras.callbacks.ModelCheckpoint(initial_weights_path, monitor='loss', save_best_only=True)
if apply_augmentation:
for initial_epoch in range(0, nb_initial_epochs):
history = model.fit_generator(
data_generator().flow(X_train[labeled_index], y_train[labeled_index], batch_size=32, shuffle=True),
steps_per_epoch=len(labeled_index), epochs=1, verbose=1, callbacks=[model_checkpoint]
)
model.save(initial_weights_path)
log(history, initial_epoch, log_file)
else:
history = model.fit(X_train[labeled_index], y_train[labeled_index], batch_size=32, epochs=nb_initial_epochs, verbose=1, shuffle=True, callbacks=[model_checkpoint])
log(history, 0, log_file)
else:
model.load_weights(initial_weights_path)
model_checkpoint = tf.keras.callbacks.ModelCheckpoint(final_weights_path, monitor='loss', save_best_only=True)
for iteration in range(1, nb_iterations+1):
if iteration == 1:
weights = initial_weights_path
else:
weights = final_weights_path
X_labeled_train, y_labeled_train, labeled_index, unlabeled_index = compute_train_sets(X_train, y_train, labeled_index, unlabeled_index, weights, iteration)
history = model.fit(X_labeled_train, y_labeled_train, batch_size=32, epochs=nb_active_epochs, verbose=1, shuffle=True, callbacks=[model_checkpoint])
log(history, iteration, log_file)
model.save(global_path + "models/active_model" + str(iteration) + ".h5")
log_file.close() |
5,047 | e1a2b33a1ec7aca21a157895d8c7c5b5f29ff49c | #!/usr/bin/python3
"""
Requests username and tasks from JSON Placeholder
based on userid (which is sys.argv[1])
"""
import json
import requests
import sys
if __name__ == "__main__":
url = "https://jsonplaceholder.typicode.com"
if len(sys.argv) > 1:
user_id = sys.argv[1]
name = requests.get("{}/users/{}".format(
url, user_id)).json().get("name")
r = requests.get("{}/todos?userId={}".format(
url, user_id)).json()
tasks_completed = []
for task in r:
if task.get("completed") is True:
tasks_completed.append(task)
print("Employee {} is done with tasks({:d}/{:d}):".format(
name, len(tasks_completed), len(r)))
if len(tasks_completed) > 0:
for task in tasks_completed:
print("\t {}".format(task.get("title")))
|
5,048 | de819a72ab659b50620fad2296027cb9f4d3e4c0 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import os
import platform
import subprocess
# try to import json module, if got an error use simplejson instead of json.
try:
import json
except ImportError:
import simplejson as json
# if your server uses fqdn, you can suppress the domain, just change the bellow variable to your domain.
my_domain = 'localdomain'
# checks if operating system is Linux.
if platform.system() == 'Linux':
# subprocess funciton, pass a operation system command as k variable.
def SubprocessPopen(k):
devnull = open(os.devnull, 'w')
proc = subprocess.Popen([k], stdout=subprocess.PIPE, shell=True, stderr=devnull)
x = proc.communicate()[0]
return x.strip()
# display hostname
def display_hostname():
x = platform.node()
return x.replace(my_domain, '').replace('.', '').lower()
# in my case the first 3 letters of the hostname indicates the site location, change if you want.
def display_site():
sites = ('SNE', 'RJO', 'BFC')
x = platform.node()
site = x.upper()[:3]
if site in sites:
return site
else:
return ''
# display operation system release.
def display_release():
k = "lsb_release -d | awk -F':' '{{print $2}}'"
return (SubprocessPopen(k.strip()))
# display the hardware serial number.
def display_hw_serialnumber():
k = "dmidecode -s system-serial-number | egrep -v '^#'"
return (SubprocessPopen(k.strip()))
# display hardware vendor.
def display_hw_vendor():
k = "dmidecode -s system-manufacturer | egrep -v '^#'"
return (SubprocessPopen(k.strip()))
# display hardware model.
def display_hw_model():
k = "dmidecode -s system-product-name | egrep -v '^#'"
return SubprocessPopen(k.strip())
# display fibre channel id wwpn.
def display_fc_wwpn():
k = "cat /sys/class/fc_host/host*/port_name|xargs"
return SubprocessPopen(k.strip().replace('0x', ''))
# display ipv4 address.
def display_ipaddr():
k = "ip addr show | egrep inet | awk '{{print $2}}' | awk -F'/' '{{print $1}}' | egrep -v '^127|::'|xargs"
return SubprocessPopen(k.strip())
# display EMC storage id.
def display_frame():
k = "powermt display ports | awk '{{print $1}}' | egrep '^[A-Z]+{2}[0-9]|[0-9]' | sort -u|xargs"
return SubprocessPopen(k.strip())
# display total memory in MB.
def display_memory():
k = "egrep MemTotal /proc/meminfo | awk -F':' '{{print $2}}' | awk '{{print int($1/1024)}}'"
return SubprocessPopen(k) + " MB"
# display cpu info, physical and cores.
def display_cpu():
k = "model=$(lscpu | egrep ^'Model name' | awk -F\: '{{print$2}}')\n" \
"socket=$(lscpu | egrep ^'Socket' | awk -F\: '{{print$2}}')\n" \
"cpu=$(lscpu | egrep ^'CPU\(' | awk -F\: '{{print$2}}')\n" \
"core=$(lscpu | egrep ^'Core' | awk -F\: '{{print$2}}')\n" \
"echo ""$model / $socket Socket\\(s\\) / $cpu CPU\\(s\\) / $core Core\\(s\\) per Socket"""
return SubprocessPopen(k)
# display information about Veritas InforScale and Cluster Server.
def display_cluster():
k = "/opt/VRTSvcs/bin/haclus -state | awk '{{print $1}}' | tail -n1"
return SubprocessPopen(k)
# display the list of cluster nodes.
def display_clusternodes():
k = "/opt/VRTSvcs/bin/hasys -list"
return SubprocessPopen(k)
# display the name of Oracle instances.
def display_db():
k = "ps -ef | grep pmon | awk -F\_ '{{print $3}}' | egrep -v '^$|\+ASM'"
return SubprocessPopen(k)
# print all information on the screen.
print(
"server_name: {0:s} \n"
"server_release: {1:s} \n"
"server_site: {2:s} \n"
"server_vendor: {3:s} \n"
"server_model: {4:s} \n"
"server_serial: {5:s} \n"
"server_cpu: {6:s} \n"
"server_memory: {7:s} \n"
"server_ip: {8:s} \n"
"server_cluster: {9:s} \n"
"server_clusternodes: {10:s} \n"
"server_frame: {11:s} \n"
"server_wwpn: {12:s} \n"
"server_db: {13:s}".format(display_hostname(), display_release(), display_site(), display_hw_vendor(), display_hw_model(),
display_hw_serialnumber(),
display_cpu(), display_memory(), display_ipaddr(), display_cluster(), display_clusternodes(),
display_frame(),
display_fc_wwpn(), display_db()))
# create a dict to export info to sqlite db.
hadouken = {'server_name': display_hostname(), 'server_release': display_release(), 'server_site': display_site(),
'server_vendor': display_hw_vendor(), 'server_model': display_hw_model(),
'server_serial': display_hw_serialnumber(), 'server_cpu': display_cpu(), 'server_memory': display_memory(),
'server_ip': display_ipaddr(), 'server_cluster': display_cluster(), 'server_clusternodes': display_clusternodes(),
'server_frame': display_frame(), 'server_wwpn': display_fc_wwpn(), 'server_db': display_db()}
# export hadouken info to be loaded into sqlite3 using db.py..
hadouken_file = '/var/tmp/%s.json' % display_hostname()
fp = open(hadouken_file, 'w')
json.dump(hadouken, fp)
else:
# if the operation system is not Linux, sorry.
print("OS not supported.")
|
5,049 | 8d4ffed90e103e61a85a54d6163770966fb2e5c9 | #!/usr/bin/env python3
"""Test telegram_menu package."""
|
5,050 | 1a730f4a5fa2be434af41a3e320cab8338d93644 | def describe():
desc = """
Problem : Given a string, find the length of the longest substring in it with no more than K distinct characters.
For example :
Input: String="araaci", K=2
Output: 4
Explanation: The longest substring with no more than '2' distinct characters is "araa", where the distinct chars are 'a' and 'r'.
-----------
"""
print(desc)
# Time complexity is O(n)
def find_substr_with_distinct_chars(str, k):
if len(str) == 0 or k <= 0:
return ""
maxSubstr = "" # Maintains final result
charMap = {}
head = 0 # Pointer of the start of the sliding window
# tail is the end pointer of the sliding window
for tail in range(0, len(str)):
tailChar = str[tail]
if tailChar not in charMap:
charMap[tailChar] = 0
charMap[tailChar] += 1
while len(charMap) > k:
headChar = str[head]
charMap[headChar] -= 1
if charMap[headChar] == 0:
del charMap[headChar]
head += 1
substr = str[head:tail+1]
if len(substr) > len(maxSubstr):
maxSubstr = substr
return maxSubstr
def main():
describe()
str = "araaci"
k = 2
res = find_substr_with_distinct_chars(str, k)
print("Input", str, k)
print("Longest substring with k distinct chars is : ", res)
print("Length of longest such substring is : ", len(res))
main()
|
5,051 | 7455eb670c2c019b8d066fcc6f2878a2136b7fd0 | __author__ = "Prikly Grayp"
__license__ = "MIT"
__version__ = "1.0.0"
__email__ = "priklygrayp@gmail.com"
__status__ = "Development"
from contextlib import closing
class RefrigeratorRaider:
'''Raid a refrigerator'''
def open(self):
print('Open fridge door.')
def take(self, food):
print('Finding {}...'.format(food))
if food == 'deep fried pizza':
raise RuntimeError('Health warning!')
print('Taking {}'.format(food))
def close(self):
print('Close fridg door.')
def raid(food):
with closing(RefrigeratorRaider()) as r:
r.open()
r.take(food)
raid('bacon')
raid('deep fried pizza') |
5,052 | 9cc6700ab14bed9d69d90c1540f6d42186033a19 | from typing import List
def sift_up(heap: List, pos: int = None):
if pos is None:
pos = len(heap) - 1
current, parent = pos, (pos - 1) // 2
while current > 0:
if heap[current] > heap[parent]:
heap[current], heap[parent] = heap[parent], heap[current]
else:
break
current, parent = parent, (parent - 1) // 2
def sift_down(heap: List, pos: int = 0):
while pos < len(heap):
left = pos * 2 + 1
right = pos * 2 + 2
if right < len(heap):
max_child = left if heap[left] > heap[right] else right
elif left < len(heap):
max_child = left
else:
return
if heap[pos] < heap[max_child]:
heap[pos], heap[max_child] = heap[max_child], heap[pos]
pos = max_child
def insert(heap: List, number: int):
heap.append(number)
sift_up(heap, len(heap) - 1)
def heapify(array: List):
for idx in range(len(array), -1, -1):
sift_down(array, idx)
def pop(heap: List):
root = heap[0]
if heap:
heap[0] = heap[-1]
heap.pop()
sift_down(heap)
return root
def make_answer(ops):
heap = list()
for op in ops:
op = op.split()
if len(op) > 1:
insert(heap, int(op[1]))
else:
yield(pop(heap))
if __name__ == "__main__":
pass
|
5,053 | 3a878c91218dfbf23477ae5b7561e9eecfcd1350 | """
Created on Dec 1, 2014
@author: Ira Fich
"""
import random
from igfig.containers import WeightedList
class Replacer():
"""
A class that replaces itself with a subclass of itself when you instantiate it
"""
subclass_weight = 0
def __new__(cls, *args, **kwargs):
subs = WeightedList(cls.__subclasses__(), [sub.subclass_weight for sub in cls.__subclasses__()])
if subs and cls.go_deeper(subs):
newcls = subs.random_choice()
return newcls.__new__(newcls, *args, **kwargs)
#TODO: check for valid_endpoint()
return super().__new__(cls)
@classmethod
def go_deeper(cls, *args, **kwargs):
"""
should we go deeper or not when we're given the option?
You probably want to override this. For example:
return random.randint(0, len(args[0]))
and usually you'll check cls.valid_endpoint() too
"""
return True
@classmethod
def valid_endpoint(cls):
"""
is this class a valid point to end our search on?
Probably want to override this too, as we may in the future
want to be able to end on non-leaf nodes
May want to combine this with go_deeper in some way, eventually
"""
return cls.__subclasses__() == []
@classmethod
def get_all_subclasses(cls, filterfn=lambda x:x):
subs = []
subs_stack = [cls]
while subs_stack:
current = subs_stack.pop(0)
subs_stack += current.__subclasses__()
if filterfn(current):
subs.append(current)
return subs
@classmethod
def count_subclass_weights(top_class):
"""
call this after you create all the classes in question, but before you create any instances of them
usually this means put all your related Replacer subclasses in one file, and call this at the end of the file
"""
for cls in reversed(top_class.get_all_subclasses()):
cls.subclass_weight = 0 #reset everything in case we've called this function before
if cls.valid_endpoint():
cls.subclass_weight += 1
for subclass in cls.__subclasses__():
cls.subclass_weight += subclass.subclass_weight
return {cls: cls.subclass_weight for cls in top_class.get_all_subclasses()}
class UniqueReplacer(Replacer):
"""
variant of Replacer that doesn't permit the same subclass to be selected more than once in a given context
"""
pass
class TentativeAssignment(object): #TODO: Rename?
"""
tentative assignment of keys to values in a constraint-satisfaction problem.
Currently it's basically a holder for a DFS "string", with the ability to lock in values (with various degrees of lockedness?)
Later, might get a more treelike structure of dependencies to reduce backtracking.
Question: aren't there already constraint-satisfaction modules that might do what I want more effectively?
They might be too limited, though... they all tend to work to find optimal solutions within finite domains, whereas my stuff tends
to be looking for one of many possible good solutions in a near-infinite domain.
"""
pass
if __name__ == "__main__":
pass |
5,054 | 25a159ca2abf0176135086324ab355d6f5d9fe9e | #!/bin/python3
import sys
from collections import deque
def connectedCell(matrix,n,m):
# Complete this function
visit = []
for j in range(n):
a = []
for i in range(m):
a.append(True)
visit.append(a)
#print(visit)
path = 0
for i in range(n):
for j in range(m):
if visit[i][j]:
count = 0
#visit[i_ind][j_ind] =
nodes = deque([(i,j)])
while nodes:
i_ind, j_ind = nodes.pop()
#visit[i_ind][j_ind] = False
#print(i_ind,j_ind )
if 0 <= i_ind < n and 0 <= j_ind < m and visit[i_ind][j_ind]:
#print(i_ind, j_ind)
visit[i_ind][j_ind] = False
if matrix[i_ind][j_ind] == 1:
count += 1
nodes_list = [(i_ind -1, j_ind-1),
(i_ind -1, j_ind),
(i_ind -1, j_ind+1),
(i_ind, j_ind-1),
(i_ind, j_ind+1),
(i_ind +1, j_ind-1),
(i_ind +1, j_ind),
(i_ind +1, j_ind+1)]
#print(*nodes_list)
nodes.extend(nodes_list)
if count > path:
path = count
return path
# if __name__ == "__main__":
# n = int(input().strip())
# m = int(input().strip())
# matrix = []
# for matrix_i in range(n):
# matrix_t = [int(matrix_temp) for matrix_temp in input().strip().split(' ')]
# matrix.append(matrix_t)
# result = connectedCell(matrix,n,m)
# print(result)
n = 2
m = 2
matrix = [[1]*n]*m
result = connectedCell(matrix,n,m)
print('result = ',result) |
5,055 | 1b741b34649193b64479724670244d258cfbbdfc | import RPi.GPIO as GPIO
import numpy as np
import array
import time
import json
import LED_GPIO as led
import BUTTON_GPIO as btn
import parseJson as gjs
rndBtnState = False
interval = .1
rndbtn = gjs.getJsonRnd()
gpioValues = gjs.getJsonData()
strArray = gpioValues[0]
btnArray = gpioValues[1]
ledArray = gpioValues[2]
clrArray = gpioValues[3]
arraySize = len(btnArray)
frequencyArray = [0.0] * arraySize
btnStateArray = [False] * arraySize
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(True)
led.init_LED_pins(ledArray, clrArray)
btn.init_BTN_pins(btnArray, rndbtn)
try:
while True:
time.sleep(interval)
btnStateArray = btn.checkButtons(btnArray, btnStateArray, ledArray)
rndBtnState = btn.checkRndButton(rndBtnState, rndbtn, btnStateArray)
if(rndBtnState):
frequencyArray = led.randomLights(ledArray, frequencyArray)
led.rnd_RGB_ON(clrArray)
else:
led.rnd_RGB_OFF(clrArray)
led.setLEDs(strArray, btnStateArray, ledArray)
led.getfrequency(btnStateArray, frequencyArray)
except KeyboardInterrupt:
print "\n"
print '%-7s %-7s %-10s' % ('color','occurrences','percent')
print '--------------------------------------------'
#testing just how random python's random module is
index = 0
total = sum(frequencyArray)
# print tabulate([strArray, frequencyArray], 'color', ' occurrences')
for index, occurrences in enumerate(frequencyArray):
s = strArray[index]
print '%-7s %12d %-0.2f' % (strArray[index], occurrences, occurrences/total * 100), "%"
index+=1
print "\n"
print "Total : ", total
finally:
GPIO.cleanup()
|
5,056 | 44dee207ffa4f78293484126234a3b606e79915b | #!/usr/bin/env python3
# Written by jack @ nyi
# Licensed under FreeBSD's 3 clause BSD license. see LICENSE
'''This class calls the system's "ping" command and stores the results'''
class sys_ping:
'''this class is a python wrapper for UNIX system ping command, subclass ping does the work, last stores data from the last sysping.ping'''
def ping(target,count,opts):
'''conducts a ping, returns the data and populates the "last" subclass'''
import subprocess
#check to see if the ping count can be used as number, if not, return with an error code
try:
int(count)
except:
return -1
count = str(count)
indata = ""
sys_ping.last.opts = opts
sys_ping.last.host = target
#actually do a syscall for the ping and output the data to indata. If ping fails to find a host, it returns error status 2, capture the error and return an error message
try:
if opts == None:
indata = subprocess.check_output(["ping","-c",count,target],stderr=subprocess.STDOUT)
else:
sys_ping.last.opts = opts
indata = subprocess.check_output(["ping","-c",count,opts,target],stderr=subprocess.STDOUT)
#if this works, return a success, which is the default state.
sys_ping.last.success = True
except subprocess.CalledProcessError:
#if ping returns an error code, return a failure, and mark the success flag as false
sys_ping.last.success = False
return {-1:"error: ping host unreachable"}
#strip trailing and leading characters, and split the lines into a list.
indata = str(indata).strip("b'")
indata = indata.strip()
indata = indata.split('\\n')
#last line is a blank, get rid of it
indata.pop()
#next line is the averages, keep splitting until we have a list of the averages.
avg_line = indata.pop()
avg_line = avg_line.split()[3]
avg_line = avg_line.split("/")
#fill the "last" class with data from the avg_line
sys_ping.last.min_time = avg_line[0]
sys_ping.last.avg_time = avg_line[1]
sys_ping.last.max_time = avg_line[2]
sys_ping.last.mdev_time = avg_line[3]
#then comes the summary line split and populate "last" class
sum_line = indata.pop()
sum_line = sum_line.split()
sys_ping.last.sent = sum_line[0]
sys_ping.last.recieved = sum_line[3]
sys_ping.last.pct_loss = sum_line[5]
sys_ping.last.op_time = sum_line[9]
#this is basicly a spacer line, throw it out as well, and a blank line above it
indata.pop()
indata.pop()
#after this is the result of the ping packets. fill a sequnce list
sequence = {}
#the first line is a worthless header.
del(indata[0])
#the rest of them are the actual ping sequence, fill them into a dictionary of sequence:pingtime
for line in indata:
line = line.split()
#fifth [4] entry is the first we care about, the sequence number. its generally icmp_seq=<#> lets keep splitting until we get the raw number.
seq = line[4].split("=")[1]
#seventh [6] entry is the second thing we care about, its the actual ping time in milliseconds.
time = line[6].split("=")[1]
sequence[seq] = time
sys_ping.last.sequence = sequence
return sequence
class last:
'''This class stores data from last sys_ping.ping()'''
#blank items for avg_line
min_time,avg_time,max_time,mdev_time = 0,0,0,0
#blank items for sum_line
sent,recieved,pct_loss,op_time = 0,0,0,0
host = ""
opts = ""
success = ""
sequence = {}
|
5,057 | e3071643548bb3a4e8d0a5710820ad39b8a6b04b | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Script to view and manage OPenn repositories. Use this script to list and
update OPenn primary repositories, to view repository details, and to list
documents in each repository.
"""
import os
import sys
import argparse
import logging
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "openn.settings")
from openn.models import *
from django.conf import settings
from openn.repository.updater import Updater
from openn.openn_exception import OPennException
from openn.openn_functions import *
from openn.repository.configs import Configs
from openn.repository.details import Details
from openn.repository.lister import Lister
# Map some convenient sort by aliases
SORT_BY_ALIASES = {
'id': 'repository_id',
'tag': 'tag',
'type': 'metadata_type',
'toc': 'include_file'
}
DETAIL_KEYS = "tag repository_id name metadata_type documents live include_file".split()
SKIP_KEYS = "blurb".split()
DETAIL_PARAMS = {
'keys': DETAIL_KEYS,
'skip': SKIP_KEYS,
'format': "%16s: %s"
}
def setup_logger():
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)-15s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logging.getLogger().addHandler(ch)
logging.getLogger().setLevel(logging.DEBUG)
global logger
logger = logging.getLogger(__name__)
def get_configs():
return Configs(settings.REPOSITORIES)
def get_updater():
configs = get_configs()
configs.validate()
return Updater(configs)
def get_sort_by_field(arg):
try:
# return the alias or the key argument itself
return SORT_BY_ALIASES.get(arg.lower(), arg)
except KeyError:
raise OPennException("Uknown sort_by option: %s" % arg)
def update_repositories(args):
updater = get_updater()
tag = args.tag
if tag is None or tag.lower() == 'all':
updater.update_all()
else:
updater.update(tag)
def update_repository(tag):
updater = get_updater()
updater.update(tag)
def validate_configuration(args):
configs = get_configs()
configs.validate()
logging.info("Repository configurations are valid")
def print_detail(detail, params=DETAIL_PARAMS):
keys = params['keys']
skip = params['skip']
fmt = params['format']
print "%s" % (detail['name'],)
for k in keys:
print fmt % (k, detail.get(k))
# print the rest of the keys
for k in detail.keys():
if k not in keys and k not in skip:
print fmt % (k, detail[k])
print
def print_repo(repo, fmtstr, tag_width):
repo.setdefault('repository_id', 'NIDB')
print fmtstr.format(repo_id=repo['repository_id'], tag=repo['tag'],
width=tag_width, doc_count=repo['documents'],
name=repo['name'])
def print_list(repos):
tag_width = len(max(get_repo_tags(), key=len)) + 2
name_width = len(max(get_repo_names(), key=len))
fmtstr = "{repo_id:5} {tag:%d} {doc_count:9} {name}" % (tag_width,)
print fmtstr.format(repo_id="ID", tag="Tag", doc_count="Doc count",
name="Repository")
print fmtstr.format(repo_id="====", tag=("=" * tag_width),
doc_count="=========", name=("=" * name_width))
for repo in repos:
print_repo(repo, fmtstr, tag_width)
def list_repositories(args):
configs = get_configs()
configs.validate()
lister = Lister(configs)
tag = args.tag
sort_by = get_sort_by_field(args.sort_by)
print_list(lister.list_all(sort_by))
def repository_details(args):
configs = get_configs()
configs.validate()
details = Details(configs)
tag = args.tag
sort_by = get_sort_by_field(args.sort_by)
if tag is None or tag.lower() == 'all':
for detail in details.details(sort_by = sort_by):
print_detail(detail)
else:
print_detail(details.get_details(tag))
def main(arguments):
setup_logger()
parser = make_parser()
args = parser.parse_args(arguments)
try:
args.func(args)
except OPennException as ex:
parser.error(unicode(ex))
def make_parser():
"""op-repo option parser"""
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
subparsers = parser.add_subparsers(help="%(prog)s actions")
#-----------
# LIST
#-----------
list_help = "List primary repositories and their documents."
list_description= """List primary repositories and the documents in them.
With no arguments, list primary repositories; --verbose adds printing
of document counts.
With the TAG argument, list documents in repository with TAG;
--verbose option forces listing of all documents, highlighting those
in repository with TAG. Special tag 'all' lists all documents and
their repository memberships; --verbose has no effect."""
list_parser = subparsers.add_parser('list', help=list_help, description=list_description,
formatter_class=argparse.RawDescriptionHelpFormatter)
list_parser.add_argument('-v', '--verbose', action='store_true', help='Verbose option.')
tag_help = """List documents in repository with tag %(metavar)s. With verbose
option, list all documents, highlighting those in the repository with
tag %(metavar)s."""
list_parser.add_argument('tag', metavar='REPO_TAG', nargs='?', help=tag_help)
list_parser.set_defaults(func=list_repositories)
sort_by_help = "Sort repositories by %(metavar)s; options: tag, name[the default], repository_id (or id)."
list_parser.add_argument('-s', '--sort-by', type=str, default='name',
metavar='FIELD', help=sort_by_help)
#-----------
# DETAILS
#-----------
details_help = "Give primary repository details."
details_description= """Give primary repository details.
With no arguments, give details for all primary repositories.
With the TAG argument, give details for repository with TAG.
"""
details_parser = subparsers.add_parser('details', help=details_help, description=details_description,
formatter_class=argparse.RawDescriptionHelpFormatter)
tag_help = """Give details for repository with tag %(metavar)s."""
details_parser.add_argument('tag', metavar='REPO_TAG', nargs='?', help=tag_help)
sort_by_help = "Sort details by %(metavar)s; options: tag, name[the default], repository_id (or id)."
details_parser.add_argument('-s', '--sort-by', type=str, default='name',
metavar='FIELD', help=sort_by_help)
details_parser.set_defaults(func=repository_details)
#-----------
# UPDATE
#-----------
update_help = "Update repositories based on application configuration."
update_description= """Update repositories based on application configuration.
Use `update` to add new repositories in the settings file to the
database.
With the TAG argument, update a single repository.
"""
update_parser = subparsers.add_parser('update', help=update_help, description=update_description,
formatter_class=argparse.RawDescriptionHelpFormatter)
update_parser.add_argument('-n', '--dry-run', action='store_true', help="Dry-run; show changes to be made.")
update_parser.add_argument('tag', metavar='REPO_TAG', nargs='?', help=tag_help)
update_parser.set_defaults(func=update_repositories)
#-----------
# VALIDATE
#-----------
validate_help = "Validate the repository configuration."
validate_description= """Validate the repository configuration.
Exits silently if the configuration is valid; otherwise, displays
error message(s). """
validate_parser = subparsers.add_parser('validate', help=validate_help, description=validate_description,
formatter_class=argparse.RawDescriptionHelpFormatter)
validate_parser.set_defaults(func=validate_configuration)
return parser
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
5,058 | 2fc2fd6631cee5f3737dadaac1a115c045af0986 | # Libraries
from sqlalchemy import Column, ForeignKey, Integer, String
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.orm import relationship
# Taskobra
from taskobra.orm.base import ORMBase
from taskobra.orm.relationships import SystemComponent
class System(ORMBase):
__tablename__ = "System"
unique_id = Column(Integer, primary_key=True)
name = Column(String)
user_roles = relationship("UserSystemRole")
system_components = relationship("SystemComponent")
@property
def components(self):
for system_component in self.system_components:
for _ in range(system_component.count):
yield system_component.count, system_component.component
def add_component(self, component):
for system_component in self.system_components:
if system_component.component is component:
system_component.count += 1
return
SystemComponent(system=self, component=component, count=1)
def __repr__(self):
components = [
f"{count}x{repr(component)}"
for count, component in self.components
]
return f"<System(name={self.name}, {components}, unique_id={self.unique_id})>"
def __str__(self):
linesep = "\n "
components = [
f"{linesep}{repr(component)}"
for _, component in self.components
]
return f"{self.name}:{''.join(components)}"
|
5,059 | ac8c8dc4bcccef7942dd48d54902e13e811f950c | #!/usr/bin/env python
# coding: utf-8
from unittest import TestCase
from optimoida.logging import (
SUCCESS, FAILURE, logger)
class LoggerTestCase(TestCase):
def test_flag_value(self):
self.assertEqual(SUCCESS, "\x1b[34mSUCCESS\x1b[0m")
self.assertEqual(FAILURE, "\x1b[31mFAILURE\x1b[0m")
def test_logger(self):
msg = "test"
self.assertEqual(logger.info(msg), "\x1b[97m[~] \x1b[0mtest")
self.assertEqual(
logger.info(msg, SUCCESS),
"\x1b[97m[~] \x1b[0m\x1b[34mSUCCESS\x1b[0m test")
self.assertEqual(logger.warn(msg), "\x1b[33m[!] \x1b[0mtest")
self.assertEqual(logger.error(msg), "\x1b[31m[-] \x1b[0mtest")
self.assertEqual(
logger.error(msg, FAILURE),
"\x1b[31m[-] \x1b[0m\x1b[31mFAILURE\x1b[0m test")
|
5,060 | 416f4c6bbd2f2b9562ab2d1477df4ebc45070d8d | #!/usr/bin/env python3
import argparse
import boutvecma
import easyvvuq as uq
import chaospy
import os
import numpy as np
import time
from dask.distributed import Client
from dask_jobqueue import SLURMCluster
import matplotlib.pyplot as plt
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="EasyVVUQ applied to BOUT++")
parser.add_argument(
"--batch",
"-b",
help="Run on a batch (SLURM) system",
action="store_true",
default=False,
)
args = parser.parse_args()
campaign = uq.CampaignDask(name="Conduction.")
print(f"Running in {campaign.campaign_dir}")
encoder = boutvecma.BOUTEncoder(template_input="models/conduction/data/BOUT.inp")
decoder = boutvecma.BOUTDecoder(variables=["T"])
params = {
"conduction:chi": {"type": "float", "min": 0.0, "max": 1e3, "default": 1.0},
"T:scale": {"type": "float", "min": 0.0, "max": 1e3, "default": 1.0},
"T:gauss_width": {"type": "float", "min": 0.0, "max": 1e3, "default": 0.2},
"T:gauss_centre": {
"type": "float",
"min": 0.0,
"max": 2 * np.pi,
"default": np.pi,
},
}
campaign.add_app("1D_conduction", params=params, encoder=encoder, decoder=decoder)
vary = {
"conduction:chi": chaospy.Uniform(0.2, 4.0),
"T:scale": chaospy.Uniform(0.5, 1.5),
"T:gauss_width": chaospy.Uniform(0.01, 0.4),
"T:gauss_centre": chaospy.Uniform(0.0, 2 * np.pi),
}
sampler = uq.sampling.PCESampler(vary=vary, polynomial_order=3)
campaign.set_sampler(sampler)
campaign.draw_samples()
run_dirs = campaign.populate_runs_dir()
print(f"Created run directories: {run_dirs}")
if args.batch:
# Example of use on Viking
cluster = SLURMCluster(
job_extra=[
"--job-name=VVUQ",
"--account=PHYS-YPIRSE-2019",
],
cores=1,
memory="1 GB",
processes=1,
walltime="00:10:00",
interface="ib0",
)
cluster.scale(16)
print(f"Job script:\n{cluster.job_script()}")
client = Client(cluster)
else:
client = Client(processes=True, threads_per_worker=1)
print(client)
time_start = time.time()
campaign.apply_for_each_run_dir(
uq.actions.ExecuteLocal(
os.path.abspath("build/models/conduction/conduction -q -q -q -d .")
),
client,
)
client.close()
time_end = time.time()
print(f"Finished, took {time_end - time_start}")
campaign.collate()
campaign.apply_analysis(uq.analysis.PCEAnalysis(sampler=sampler, qoi_cols=["T"]))
results = campaign.get_last_analysis()
state_filename = os.path.join(campaign.campaign_dir, "campaign_state.json")
campaign.save_state(state_filename)
plt.figure()
results.plot_moments(
"T", xlabel=r"$\rho$", filename=f"{campaign.campaign_dir}/moments.png"
)
plt.figure()
results.plot_sobols_first(
"T", xlabel=r"$\rho$", filename=f"{campaign.campaign_dir}/sobols_first.png"
)
|
5,061 | 1a9cad6e49e5ed2bb7781f9fec930d48ec048b3b | #!/usr/bin/env python
# coding: utf-8
# MIT Licensed
# http://opensource.org/licenses/MIT
led_dir = "/sys/class/gpio/gpio40/"
led_pin = led_dir + "value"
led_mode = led_dir + "direction"
with open(led_mode, "wb") as f:
f.write("out")
with open(led_pin, "wb") as f:
f.write(__import__("sys").argv[1])
"""
Contributors!
Danilo J. S. Bellini
Estevão U. P. Vieira
Lucas S. Simões
Thiago M. Sanches
Paulo R. O. Castro
AEEEW!!!! =D
"""
|
5,062 | 70c20b38edb01552a8c7531b3e87a9302ffaf6c5 | # -*- coding: utf-8 -*-
"""Module providing views for asset storage folder"""
from Products.Five.browser import BrowserView
from plone import api
from plone.app.contenttypes.interfaces import IImage
class AssetRepositoryView(BrowserView):
""" Folderish content page default view """
def contained_items(self, uid):
stack = api.content.get(UID=uid)
return stack.restrictedTraverse('@@folderListing')()
def item_index(self, uid):
return len(self.contained_items(uid))
def preview_image(self, uid):
images = self.contained_items(uid)
preview = None
if len(images):
first_item = images[0].getObject()
if IImage.providedBy(first_item):
preview = first_item
return preview
|
5,063 | 28077af0759e062078f7b9d1f7bbbb93c62835cb | version = (2, 5, 8)
version_string = ".".join(str(v) for v in version)
release_date = "2015.12.27"
|
5,064 | ec19567b49f686f613308d79e439f6ff9053fa40 | import sys
import os
import logging
import sh
from ..util.path import SmartTempDir, replace_path
logger = logging.getLogger('pyrsss.gps.teqc')
def rinex_info(rinex_fname,
nav_fname,
work_path=None):
"""
Query RINEX file *rinex_fname* and RINEX nav file *nav_fname* for
useful information and return in a key/value mapping. Store
intermediate files in *work_path* (a temporary, automatically
cleaned up area if not specified).
"""
if not os.path.isfile(rinex_fname):
raise ValueError('RINEX observation file {} does not exist'.format(rinex_fname))
if not os.path.isfile(nav_fname):
raise ValueError('RINEX navigation file {} does not exist'.format(nav_fname))
# information mapping
info = {}
def process_output(line):
if line.startswith('Receiver type'):
info['receiver'] = line.split(':')[1].split('(')[0].strip()
elif line.lstrip().startswith('antenna WGS 84 (xyz)'):
# make sure units are [m]
assert line.rstrip().endswith('(m)')
info['xyz'] = map(float, line.split(':')[1].split('(')[0].split())
elif line.lstrip().startswith('antenna WGS 84 (geo)'):
if line.split(':')[1].lstrip()[0] in ['N', 'S']:
# skip arcmin, arcsec line
pass
else:
lat, _, lon, _ = line.split(':')[1].split(None, 3)
info['lat'] = float(lat)
lon = float(lon)
while lon > 180:
lon -= 360
info['lon'] = lon
elif line.lstrip().startswith('WGS 84 height'):
assert line.rstrip().endswith('m')
info['height'] = float(line.split(':')[1].rstrip()[:-1])
elif line.startswith('|qc - header| position'):
# make sure units are [m]
assert line.rstrip()[-1] == 'm'
info['xyz error'] = float(line.split(':')[1].rstrip()[:-1])
elif line.startswith('Observation interval'):
info['interval'] = float(line.split(':')[1].split()[0])
elif line.startswith('Moving average MP12'):
info['MP12'] = float(line.split(':')[1].rstrip()[:-1])
elif line.startswith('Moving average MP21'):
info['MP21'] = float(line.split(':')[1].rstrip()[:-1])
# query the RINEX file via teqc quality check --- process in given
# work area to avoid intermediate file pollution
with SmartTempDir(work_path) as work_path:
intermediate_rinex_fname = replace_path(work_path, rinex_fname)
os.symlink(os.path.abspath(rinex_fname),
intermediate_rinex_fname)
intermediate_nav_fname = replace_path(work_path, nav_fname)
os.symlink(os.path.abspath(nav_fname),
intermediate_nav_fname)
sh.teqc('+qc',
'+quiet',
'-R',
'-S',
'-E',
'-C',
'-J',
'-nav', intermediate_nav_fname,
intermediate_rinex_fname,
_cwd=work_path,
_out=process_output,
_err=sys.stderr)
os.remove(intermediate_rinex_fname)
os.remove(intermediate_nav_fname)
return info
def rinex_merge(output_fname, rinex_fnames, _err=sys.stderr):
"""
Using teqc, merge *rinex_fnames* and store to the file
*output_fname*. Returns *output_fname*. Redirect error output to
*_err*.
"""
args = ['-pch'] + rinex_fnames
sh.teqc(*args,
_out=output_fname,
_err=_err)
return output_fname
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
logging.getLogger('sh').setLevel(logging.WARNING)
rinex_fname = '/Users/butala/src/absolute_tec/jplm0010.14o'
nav_fname = '/Users/butala/src/absolute_tec/jplm0010.14n'
info = rinex_info(rinex_fname,
nav_fname)
for key in sorted(info):
print('{:10s}: {}'.format(key, info[key]))
|
5,065 | ecd5097d9d497b62b89217ee3c46506f21fc15d2 | from web3 import Web3, HTTPProvider, IPCProvider
from tcmb.tcmb_parser import TCMB_Processor
from ecb.ecb_parser import ECB_Processor
from web3.contract import ConciseContract
from web3.middleware import geth_poa_middleware
import json
import time
tcmb_currencies = ["TRY", "USD", "AUD", "DKK", "EUR", "GBP", "CHF", "SEK", "CAD",
"KWD", "NOK", "SAR", "JPY", "BGN", "RON", "RUB", "IRR", "CNY", "PKR"]
ecb_currencies = ["EUR", "USD", "JPY", "BGN", "CZK", "DKK", "GBP", "HUF", "PLN",
"RON", "SEK", "CHF", "ISK", "NOK", "HRK", "RUB", "TRY", "AUD", "BRL",
"CAD", "CNY", "HKD", "IDR", "ILS", "INR", "KRW", "MXN", "MYR", "NZD",
"PHP", "SGD", "THB", "ZAR"]
def epoch_day(epoch_time):
epoch_time = int(epoch_time)
return(epoch_time - (epoch_time % 86400))
with open('config_ebloc.json') as json_data_file:
config_data = json.load(json_data_file)
owner_address = config_data["owner"]["address"]
owner_password = config_data["owner"]["password"]
contract_address = config_data["contract"]["address"]
contract_abi = config_data["contract"]["abi"]
gas = int(config_data["price"]["gas"])
gas_price = Web3.toWei( int(config_data["price"]["gas_price"]), 'gwei')
ecb_daily_log_path = config_data["log"]["ecb_daily"]
tcmb_daily_log_path = config_data["log"]["tcmb_daily"]
geth_ipc_path = config_data["geth"]["geth_ipc_path"]
contract_address = Web3.toChecksumAddress(contract_address)
web3 = Web3(IPCProvider(geth_ipc_path))
web3.middleware_stack.inject(geth_poa_middleware, layer=0)
web3.eth.defaultAccount = web3.eth.accounts[0]
web3.personal.unlockAccount(web3.eth.accounts[0], owner_password)
contract_instance = web3.eth.contract(abi=contract_abi, address=contract_address, ContractFactoryClass=ConciseContract)
unix_time = Web3.toInt(epoch_day(time.time()))
def add_ecb():
unix_time = Web3.toInt(epoch_day(time.time()))
ECB = ECB_Processor()
f = open(ecb_daily_log_path, "a")
if(time.strftime("%Y-%m-%d") == ECB.Currency_Dict["time"]):
for curr in ecb_currencies:
curr_code = bytes(curr, encoding='utf-8')
curr_value = web3.toInt(int(float(ECB.Currency_Dict[curr])*(10**9)))
tx_hash = contract_instance.add_ecb(unix_time, curr_code, curr_value, transact={'from': web3.eth.accounts[0]})
tx_hash = tx_hash.hex()
print(time.strftime("%Y-%m-%d %H:%M"), unix_time, tx_hash, curr_code, file=f)
else:
print(time.strftime("%Y-%m-%d %H:%M"), unix_time, "Weekend", file=f)
f.close()
def add_tcmb():
unix_time = Web3.toInt(epoch_day(time.time()))
TCMB = TCMB_Processor()
f = open(tcmb_daily_log_path, "a")
if(time.strftime("%m/%d/%Y") == TCMB.CURRENCY_DICT["Date"]):
for curr in tcmb_currencies:
curr_code = bytes(curr, encoding='utf-8')
curr_value_fb = web3.toInt(int(float(TCMB.CURRENCY_DICT[curr]["ForexBuying"])*(10**9)))
curr_value_fs = web3.toInt(int(float(TCMB.CURRENCY_DICT[curr]["ForexSelling"])*(10**9)))
# forex buying
tx_hash_fb = contract_instance.add_tcmb_forexbuying(unix_time, curr_code, curr_value_fb, transact={'from': web3.eth.accounts[0]})
tx_hash_fb = tx_hash_fb.hex()
print(time.strftime("%Y-%m-%d %H:%M"), unix_time, tx_hash_fb, curr_code, file=f)
# forex selling
tx_hash_fs = contract_instance.add_tcmb_forexselling(unix_time, curr_code, curr_value_fs, transact={'from': web3.eth.accounts[0]})
tx_hash_fs = tx_hash_fs.hex()
print(time.strftime("%Y-%m-%d %H:%M"), unix_time, tx_hash_fs, curr_code, file=f)
else:
print(time.strftime("%Y-%m-%d %H:%M"), unix_time, "Weekend", file=f)
f.close()
if __name__ == "__main__":
add_ecb()
add_tcmb()
print(time.strftime("%Y-%m-%d %H:%M"), " DONE EBLOC add_ecb & add_tcmb") |
5,066 | d9cdcf64042c3c6c4b45ec0e3334ba756dd43fcd | # -*- coding: utf-8 -*-
"""
Created on Mon May 2 17:24:00 2016
@author: pasca
"""
# -*- coding: utf-8 -*-
import os.path as op
from nipype.utils.filemanip import split_filename as split_f
from nipype.interfaces.base import BaseInterface, BaseInterfaceInputSpec
from nipype.interfaces.base import traits, File, TraitedSpec
from neuropype_ephy.compute_inv_problem import compute_ROIs_inv_sol
from neuropype_ephy.preproc import create_reject_dict
from mne import find_events, compute_covariance, pick_types, write_cov, Epochs
from mne.io import Raw
class InverseSolutionConnInputSpec(BaseInterfaceInputSpec):
sbj_id = traits.String(desc='subject id', mandatory=True)
sbj_dir = traits.Directory(exists=True, desc='Freesurfer main directory',
mandatory=True)
raw_filename = traits.File(exists=True, desc='raw filename', mandatory=True)
cov_filename = traits.File(exists=True, desc='Noise Covariance matrix',
mandatory=True)
fwd_filename = traits.File(exists=True, desc='LF matrix', mandatory=True)
is_epoched = traits.Bool(desc='if true raw data will be epoched',
mandatory=False)
events_id = traits.Dict(None, desc='the id of all events to consider.', mandatory=False)
event_id = traits.Int(None, desc='the id of the event to consider.', mandatory=False)
t_min = traits.Float(None, desc='start time before event', mandatory=False)
t_max = traits.Float(None, desc='end time after event', mandatory=False)
is_evoked = traits.Bool(desc='if true if we want to analyze evoked data',
mandatory=False)
inv_method = traits.String(desc='possible inverse methods are \
sLORETA, MNE, dSPM', mandatory=True)
snr = traits.Float(1.0, usedefault=True, desc='use smaller SNR for \
raw data', mandatory=False)
parc = traits.String('aparc', usedefault=True,
desc='the parcellation to use: aparc vs aparc.a2009s',
mandatory=False)
aseg = traits.Bool(desc='if true sub structures will be considered',
mandatory=False)
aseg_labels = traits.List(desc='list of substructures in the src space',
mandatory=False)
is_blind = traits.Bool(desc='if in the source space there are ROI removed',
mandatory=False)
labels_removed = traits.List(desc='list of label we consider in the blind case',
mandatory=False)
class InverseSolutionConnOutputSpec(TraitedSpec):
ts_file = File(exists=False, desc='source reconstruction in .npy format')
labels = File(exists=False, desc='labels file in pickle format')
label_names = File(exists=False, desc='labels name file in txt format')
label_coords = File(exists=False, desc='labels coords file in txt format')
class InverseSolution(BaseInterface):
"""
Compute the inverse solution on raw data considering N_r regions in source
space based on a FreeSurfer cortical parcellation
"""
input_spec = InverseSolutionConnInputSpec
output_spec = InverseSolutionConnOutputSpec
def _run_interface(self, runtime):
sbj_id = self.inputs.sbj_id
sbj_dir = self.inputs.sbj_dir
raw_filename = self.inputs.raw_filename
cov_filename = self.inputs.cov_filename
fwd_filename = self.inputs.fwd_filename
is_epoched = self.inputs.is_epoched
event_id = self.inputs.event_id
t_min = self.inputs.t_min
t_max = self.inputs.t_max
is_evoked = self.inputs.is_evoked
events_id = self.inputs.events_id
inv_method = self.inputs.inv_method
snr = self.inputs.snr
parc = self.inputs.parc
aseg = self.inputs.aseg
aseg_labels = self.inputs.aseg_labels
is_blind = self.inputs.is_blind
labels_removed = self.inputs.labels_removed
self.ts_file, self.labels , self.label_names, self.label_coords= compute_ROIs_inv_sol(raw_filename, sbj_id, sbj_dir,
fwd_filename,
cov_filename,
is_epoched,
event_id, t_min, t_max,
is_evoked,
events_id,
snr, inv_method, parc,
aseg, aseg_labels,
is_blind, labels_removed)
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
outputs['ts_file'] = self.ts_file
outputs['labels'] = self.labels
outputs['label_names'] = self.label_names
outputs['label_coords'] = self.label_coords
return outputs
class NoiseCovarianceConnInputSpec(BaseInterfaceInputSpec):
cov_fname_in = traits.File(exists=False, desc='file name for Noise Covariance Matrix')
raw_filename = traits.File(exists=True, desc='raw data filename')
is_epoched = traits.Bool(desc='if true if we want to epoch the data',
mandatory=False)
is_evoked = traits.Bool(desc='if true if we want to analyze evoked data',
mandatory=False)
events_id = traits.Dict(None, desc='the id of all events to consider.', mandatory=False)
t_min = traits.Float(None, desc='start time before event', mandatory=False)
t_max = traits.Float(None, desc='end time after event', mandatory=False)
class NoiseCovarianceConnOutputSpec(TraitedSpec):
cov_fname_out = File(exists=False, desc='LF matrix')
class NoiseCovariance(BaseInterface):
"""
Compute the inverse solution on raw data considering N_r regions in source
space based on a FreeSurfer cortical parcellation
"""
input_spec = NoiseCovarianceConnInputSpec
output_spec = NoiseCovarianceConnOutputSpec
def _run_interface(self, runtime):
raw_filename = self.inputs.raw_filename
cov_fname_in = self.inputs.cov_fname_in
is_epoched = self.inputs.is_epoched
is_evoked = self.inputs.is_evoked
events_id = self.inputs.events_id
t_min = self.inputs.t_min
t_max = self.inputs.t_max
if cov_fname_in == '' or not op.exists(cov_fname_in):
if is_epoched and is_evoked:
raw = Raw(raw_filename)
events = find_events(raw)
data_path, basename, ext = split_f(raw.info['filename'])
self.cov_fname_out = op.join(data_path, '%s-cov.fif' % basename)
if not op.exists(self.cov_fname_out):
print '\n*** COMPUTE COV FROM EPOCHS ***\n' + self.cov_fname_out
reject = create_reject_dict(raw.info)
picks = pick_types(raw.info, meg=True, ref_meg=False,
exclude='bads')
epochs = Epochs(raw, events, events_id, t_min, t_max,
picks=picks, baseline=(None, 0),
reject=reject)
# TODO method='auto'? too long!!!
noise_cov = compute_covariance(epochs, tmax=0,
method='diagonal_fixed')
write_cov(self.cov_fname_out, noise_cov)
else:
print '\n *** NOISE cov file %s exists!!! \n' % self.cov_fname_out
else:
'\n *** NO EPOCH DATA \n'
else:
print '\n *** NOISE cov file %s exists!!! \n' % cov_fname_in
self.cov_fname_out = cov_fname_in
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
outputs['cov_fname_out'] = self.cov_fname_out
return outputs
|
5,067 | 4a8d203872a1e86c54142dea6cd04c1cac6bcfb2 |
# coding: utf-8
# In[1]:
import numpy as np
import pandas as pd
from sklearn.svm import SVR
# In[2]:
from sklearn.preprocessing import StandardScaler
# In[3]:
#import matplotlib.pyplot as plt
# %matplotlib inline
# In[90]:
aapl = pd.read_csv('return_fcast.csv')
# In[79]:
y = aapl['return']
# In[80]:
X = aapl[['ADXR','ATR','SMA','Hurst','EMA','MACD','VIX','RSI']]
# In[14]:
X = X.reshape((2475,8))
# In[21]:
y = np.array(y).reshape((2475,1))
# In[22]:
sc_X = StandardScaler()
sc_y = StandardScaler()
X = sc_X.fit_transform(X)
y = sc_y.fit_transform(y)
# In[25]:
regressor = SVR(kernel='rbf')
regressor.fit(X,y)
# In[27]:
testing_df = pd.read_csv('testing.csv')
# In[28]:
X_test = testing_df[['ADXR','ATR','SMA','Hurst','EMA','MACD','VIX','RSI']]
# In[29]:
X_test.shape
# In[33]:
X_test = sc_X.fit_transform(X_test)
# In[34]:
# In[35]:
y_pred
# In[36]:
y_pred = regressor.predict(X_test)
y_pred = sc_y.inverse_transform(y_pred)
# In[37]:
y_pred
# In[38]:
for i in range(len(y_pred)):
print(y_pred[i])
# In[3]:
# run SVR for the AXP-...DD stocks
axp = pd.DataFrame(columns=aapl.columns)
# In[3]:
#stocks = ['AAPL','AXP','BA','CAT','CSCO','CVX','DIS','DD','GS']
stocks = ['MCD']
# In[4]:
# read indicators 09-18
ADXR = pd.read_csv('data/djADXR.csv')
ATR = pd.read_csv('data/djATR.csv')
SMA = pd.read_csv('data/sma.csv')
Hurst = pd.read_csv('data/hurst.csv')
EMA = pd.read_csv('data/ema.csv')
MACD = pd.read_csv('data/macd.csv')
VIX = pd.read_csv('data/vix.csv')
RSI = pd.read_csv('data/rsi.csv')
# In[5]:
VIX.iloc[40:2476]
# In[121]:
# read stock prices 09-18
# In[3]:
dj_df = pd.read_csv('data/djindex.csv')
# In[7]:
dj_df = pd.read_csv('data/djindex.csv')
#dj_df = dj_df[['Date','AAPL','AXP','BA','CAT','CSCO','CVX','DIS','DD','GS']].iloc[39:2516]
#dj_df = dj_df[['Date','MCD']].iloc[39:2516]
return_df = pd.DataFrame(columns=dj_df.columns[1:],index=dj_df['Date'])
for i in dj_df.columns[1:]:
return_df[i] = list(np.log(dj_df[i]/dj_df[i].shift(1)))
# In[9]:
return_df = return_df.dropna()
# In[10]:
cov = return_df.cov()
cov.to_csv('cov0918.csv')
# In[41]:
list(dj_df.columns[1:])
# In[39]:
dj_df.index = dj_df
# In[65]:
stock18 = dj_df[dj_df.columns[1:]].iloc[-252:]
# In[67]:
stock18
# In[44]:
dj_df
# In[68]:
dj_df = pd.read_csv('data/djindex.csv')
dj_df.index = dj_df['Date']
return_df = pd.DataFrame(columns=list(dj_df.columns[1:]))
for i in dj_df.columns[1:]:
return_df[i] = list(np.log(stock18[i]/stock18[i].shift(1)))
# In[69]:
return_df = return_df.dropna()
# In[70]:
return_df
# In[71]:
cov = return_df.cov()
# In[72]:
cov
# In[73]:
cov.to_csv('cov.csv')
# In[10]:
# store return prediction
result = pd.DataFrame(columns=stocks)
# In[21]:
# indicators forecast
ADXR_f = pd.read_csv('data/tesingadxr740.csv')
ATR_f = pd.read_csv('data/tesingatr740.csv')
SMA_f = pd.read_csv('data/sma_forecast.csv')
Hurst_f = pd.read_csv('data/hurst_forecast.csv')
EMA_f = pd.read_csv('data/ema_fcast.csv')
MACD_f = pd.read_csv('data/macd_fcast.csv')
VIX_f = pd.read_csv('data/vix_fcast.csv')
RSI_f = pd.read_csv('data/rsi_fcast.csv')
# In[22]:
# Initialized scaler in order to transform variables into (-1,1)
sc_X = StandardScaler()
sc_y = StandardScaler()
regressor = SVR(kernel='rbf')
temp = pd.DataFrame(columns=['ADXR','ATR','SMA','Hurst','EMA','MACD','VIX','RSI'])
temp['VIX'] = list(VIX['VIX'].iloc[40:2516]) # all stocks share the same vix
temp_f = pd.DataFrame(columns=['ADXR','ATR','SMA','Hurst','EMA','MACD','VIX','RSI'])
temp_f['VIX'] = list(VIX_f['VIX Forecast'].iloc[0:250])
for i in ['MCD']: # iterate each stock
# First, extract training data set, including indicators(X) and return(y)
temp['ADXR'] = list(ADXR[i].iloc[40:2516])
temp['ATR'] = list(ATR[i].iloc[40:2516])
temp['SMA'] = list(SMA['SMA_'+i].iloc[40:2516])
temp['Hurst'] = list(Hurst['Hurst'+i].iloc[40:2516])
temp['EMA'] = list(EMA[i].iloc[40:2516])
temp['MACD'] = list(MACD[i].iloc[40:2516])
temp['RSI'] = list(RSI[i].iloc[40:2516])
# transformation
X = sc_X.fit_transform(temp[['ADXR','ATR','SMA','Hurst','EMA','MACD','VIX','RSI']])
#print(X.shape)
y = sc_y.fit_transform(np.array(return_df[i].dropna()).reshape(2476,1))
#print(y.shape)
# training
regressor.fit(X,y)
# predicting
temp_f['ADXR'] = list(ADXR_f[i+'.1976.10.11.20.00.00'].iloc[0:250])
temp_f['ATR'] = list(ATR_f[i+'.1976.11.07.19.00.00'].iloc[0:250])
temp_f['SMA'] = list(SMA_f[i].iloc[0:250])
temp_f['Hurst'] = list(Hurst_f[i].iloc[0:250])
temp_f['EMA'] = list(EMA_f[i].iloc[0:250])
temp_f['MACD'] = list(MACD_f[i].iloc[0:250])
temp_f['RSI'] = list(RSI_f[i].iloc[0:250])
X_test = temp_f[['ADXR','ATR','SMA','Hurst','EMA','MACD','VIX','RSI']]
X_test = sc_X.fit_transform(X_test)
y_pred = regressor.predict(X_test)
y_pred = sc_y.inverse_transform(y_pred)
# write predicted returns into result
result[i] = y_pred
print(i)
# In[23]:
result.to_csv('mac_fcast.csv')
# In[161]:
ADXR['AAPL'].iloc[40:2516]
# In[ ]:
|
5,068 | 2a09711e3e487c5d7790af592ff2eb03bb53cff2 | def inplace_quick_sort(S, start, end):
if start > end:
return
pivot = S[end]
left = start
right = end - 1
while left <= right:
while left <= right and S[left] < pivot:
left += 1
while left <= right and pivot < S[right]:
right -= 1
if left <= right:
S[left], S[right] = S[right], S[left]
left += 1
right -= 1
S[left], S[end] = S[end], S[left]
inplace_quick_sort(S, start, left - 1)
inplace_quick_sort(S, left+1, end)
S = [4, 2, 6, 8, 3, 9, 5, 11]
inplace_quick_sort(S, 0, 7)
print(S) |
5,069 | 046db03b146ce0182ba7889908f536a09de051d5 | from HDPython import *
import HDPython.examples as ahe
from enum import Enum, auto
class counter_state(Enum):
idle = auto()
running = auto()
done = auto()
class Counter_cl(v_class_master):
def __init__(self):
super().__init__()
self.counter = v_variable(v_slv(32))
self.counter_max = v_variable(v_slv(32))
self.state = v_variable(v_enum(counter_state.idle))
def _onPull(self):
if self.state == counter_state.running:
self.counter << self.counter + 1
def count_to_max(self, maxValue):
if self.state == counter_state.idle:
self.counter << 0
self.counter_max << maxValue
self.state << counter_state.running
def isDone(self):
return self.state == counter_state.done
def reset(self):
if self.state == counter_state.done:
self.state << counter_state.idle
class my_first_test_bench(v_entity):
def __init__(self):
super().__init__()
self.architecture()
@architecture
def architecture(self):
counter = v_variable(v_slv(32))
max_cnt = v_variable(v_slv(32,300))
clkgen = v_create(ahe.clk_generator())
cnt = Counter_cl()
@rising_edge(clkgen.clk)
def proc():
counter << counter + 1
cnt.count_to_max(max_cnt)
if cnt.isDone():
cnt.reset()
end_architecture()
my_first_instance = v_create(my_first_test_bench())
convert_to_hdl(my_first_instance, "myFirst") |
5,070 | 628fdf848079d0ecf5bf4f5bd46e07ad6cd10358 | from threading import Thread
import time
def sleeping():
time.sleep(5)
print('Ended')
Thread(target=sleeping, daemon=True).start()
print('Hello world')
time.sleep(5.5) |
5,071 | 454f885e2254295ce6508e70c0348f5cbe855520 | from handler.auth import provider_required
from handler.provider import ProviderBaseHandler
from forms.provider import ProviderAddressForm, ProviderVanityURLForm
import logging
from data import db
from util import saved_message
class ProviderEditAddressHandler(ProviderBaseHandler):
@provider_required
def get(self, vanity_url=None):
provider = db.get_provider_from_vanity_url(vanity_url)
logging.info("provider dump before edit:" + str(vars(provider)))
address_form = ProviderAddressForm().get_form(obj=provider)
vanity_url_form = ProviderVanityURLForm().get_form(obj=provider)
self.render_address(provider, address_form=address_form, vanity_url_form=vanity_url_form)
@provider_required
def post(self, vanity_url=None):
form = ProviderAddressForm().get_form(self.request.POST)
if form.validate():
# Store Provider
provider = db.get_provider_from_vanity_url(vanity_url)
form.populate_obj(provider)
provider.put()
vanity_url_form = ProviderVanityURLForm().get_form(obj=provider)
self.render_address(provider, address_form=form, vanity_url_form=vanity_url_form, success_message=saved_message)
# log the event
self.log_event(user=provider.user, msg="Edit Address: Success")
else:
# show validation error
provider = db.get_provider_from_vanity_url(vanity_url)
vanity_url_form = ProviderVanityURLForm().get_form(obj=provider)
self.render_address(provider, address_form=form, vanity_url_form=vanity_url_form)
# log the event
self.log_event(user=provider.user, msg="Edit Address: Validation Error")
class ProviderChangeURLHandler(ProviderBaseHandler):
@provider_required
def post(self, vanity_url=None):
form = ProviderVanityURLForm().get_form(self.request.POST)
if form.validate():
# Store Provider
provider = db.get_provider_from_vanity_url(vanity_url)
form.populate_obj(provider)
provider.put()
self.redirect('/provider/address/' + provider.vanity_url)
# log the event
self.log_event(user=provider.user, msg="Edit Address: Success")
else:
# show validation error
provider = db.get_provider_from_vanity_url(vanity_url)
address_form = ProviderAddressForm().get_form(obj=provider)
self.render_address(provider, address_form=address_form, vanity_url_form=form)
# log the event
self.log_event(user=provider.user, msg="Edit Address: Validation Error")
|
5,072 | 2dc4a4ae8e02e823073b1a9711dbd864a54bab43 | class Account:
'''은행계좌를 표현하는 클래스'''
def __init__(self,name,account):
self.name = name
self._balance = amount
def __str__(self):
return '예금주 {}, 잔고 {}'.format(slef.name, self._balance)
def _info(self):
print('\t') |
5,073 | abf25cf3d4435754b916fa06e5e887b1e3589a1c | from django import forms
from crawlr.models import Route, Category, UserProfile
from django.contrib.auth.models import User
class CategoryForm(forms.ModelForm):
name = forms.CharField(max_length=128,
help_text = "Please enter the category name.")
views = forms.IntegerField(widget=forms.HiddenInput(), initial=0)
likes = forms.IntegerField(widget=forms.HiddenInput(), initial=0)
slug = forms.CharField(widget=forms.HiddenInput(), required=False)
class Meta:
model = Category
fields = ('name',)
class RouteForm(forms.ModelForm):
error_messages = {'duplicate_title':'Please enter a unique name for the crawl'}
title = forms.CharField(max_length=128,
help_text = "Please enter the name of the Crawl")
views = forms.IntegerField(widget=forms.HiddenInput(), initial=0)
#Hidden inputs for the variables retrieved from find directions page
start = forms.CharField(widget=forms.HiddenInput())
end = forms.CharField(widget=forms.HiddenInput())
waypts = forms.CharField(widget=forms.HiddenInput())
#Location choice, a drop down menu selection
category = forms.ModelChoiceField(queryset=Category.objects.all())
slug = forms.CharField(widget=forms.HiddenInput(), required=False)
created_by = forms.ModelChoiceField(queryset=User.objects.all(), widget=forms.HiddenInput())
class Meta:
model = Route
fields = ('category', 'title', 'slug', 'start', 'end', 'waypts', 'created_by')
def clean_title(self):
title = self.cleaned_data["title"]
try:
Route.objects.get(title=title)
raise forms.ValidationError(
self.error_messages['duplicate_title'], # customized error message
code='duplicate_title',
)
except Route.DoesNotExist:
return title
class UserForm(forms.ModelForm):
password = forms.CharField(widget=forms.PasswordInput(attrs={'placeholder' : 'Password'}), label='')
username = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'Username'}), label='')
email = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'Email'}), label='', required=False)
class Meta:
model = User
fields = ('username', 'email', 'password')
class UserProfileForm(forms.ModelForm):
class Meta:
model = UserProfile
fields = ('picture',)
|
5,074 | c2c1194ed23adda015b23897888d1a4cc11423d5 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Thibaut Lapierre <git@epheo.eu>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from shaddock.drivers.docker.api import DockerApi
from docker import errors as docker_errors
import sys
class Container(object):
"""Instance a defined container
This class instance a Docker container depending on its
name and model definition.
The basics Docker methods are implemented as well as a
Shaddock's specific one that return the information of
the concerned container.
Shaddock keep no tracks of any Container ID and rely on no
databases. THe containers are retrieve from their names.
"""
def __init__(self, svc_cfg, containers_all=None):
self.cfg = svc_cfg
self.env = dict(self.cfg)
# we may want to use func.__code__.co_varnames here to gather all
# possible arguments of the docker api and compare them with cfg
# and delete the crapy hack of the next 8 lines.
args_to_delete = ['priority', 'depends-on', 'detach', 'api_cfg',
'cluster', 'images_dir', 'path', 'service_name',
'host']
for arg in args_to_delete:
try:
del self.env[arg]
except KeyError:
pass
self.env['detach'] = self.cfg.get('detach', True)
self.docker_client = None
if containers_all is None:
docker_api = DockerApi(self.cfg['api_cfg'])
self.docker_api = docker_api.connect()
self.docker_client = self.docker_api.containers
self.info = self._get_info(containers_all)
def gather_api_methods(self, func):
return func.__code__.co_varnames
def create(self):
"""Returns a Container object"""
print('Creating container: {}'.format(self.cfg['name']))
create = self.docker_client.create(**self.env)
return create['id']
def start(self):
"""Returns a Container object"""
try:
print('Starting container: {}'.format(self.cfg['name']))
start = self.docker_client.run(**self.env)
except docker_errors.APIError as error:
print(error)
print('Container {} is already running'.format(self.cfg['name']))
return self.cfg['name']
return start
def stop(self):
c = self.info.get('Container')
if c is not None:
print('Stopping container: {}'.format(self.cfg['name']))
return c.stop()
def remove(self):
self.stop()
c = self.info.get('Container')
if c is not None:
print('Removing container: {}'.format(self.cfg['name']))
try:
c.remove()
except docker_errors.NotFound:
print('Container {} does not exist'.format(self.cfg['name']))
return True
def restart(self):
self.docker_client.restart(self.info['Id'])
def return_shell(self, cmd):
if self.cfg['image'] is not None:
# "Fix" in order to not use the stream generator in Python2
c = self.info.get('Container')
if sys.version_info > (3, 0):
try:
ret = c.exec_run(cmd,
stderr=True,
stdout=True,
stream=True,
)
for line in ret[1]:
print(line.decode('utf-8').rstrip())
except (KeyboardInterrupt, SystemExit):
return True
else:
line = c.exec_run(cmd,
stderr=True,
stdout=True,
stream=False)
print(line[1])
def return_logs(self):
if self.cfg['image'] is not None:
# "Fix" in order to not use the stream generator in Python2
c = self.info.get('Container')
if sys.version_info > (3, 0):
try:
for line in c.logs(stderr=True,
stdout=True,
timestamps=False,
stream=True,
):
print(line.decode('utf-8').rstrip())
except (KeyboardInterrupt, SystemExit):
return True
else:
line = c.logs(stderr=True,
stdout=True,
timestamps=False,
stream=False)
print(line)
def _get_info(self, containers_all=None):
info = {}
if containers_all is None:
containers_all = self.docker_client.list(all=True)
try:
container = [c for c in containers_all
if (c.name in self.cfg['service_name'])][0]
api = DockerApi(self.cfg['api_cfg'], 'lowlevelapi')
api = api.connect()
infos = api.inspect_container(container.id)
info['Container'] = container
info['Id'] = container.id
info['Ip'] = infos['NetworkSettings']['IPAddress']
info['State'] = container.status
except IndexError:
# Container is not running
info = {}
return info
|
5,075 | fc8976141a19afd099f92cbbdb578e9c620cb745 | array = [1, 7, 3, 8, 9, 2, 4]
index = 0
while (index < len(array)):
count = 0
while(count <= len(array)-2):
if(count == len(array)-1):
break
if (array[count] > array[count+1]):
sift = array[count]
array[count] = array[count+1]
array[count+1] = sift
count = count + 1
index = index + 1
print (array) |
5,076 | e7295336a168aa2361a9090e79465eab5f564599 | __author__ = 'sushil'
from .utilities import decompose_date
from .DateConverter import _bs_to_ad, _ad_to_bs
def convert_to_ad(bs_date):
date_components = decompose_date(bs_date)
year, month, day = date_components
ad_year, ad_month, ad_day = _bs_to_ad(year, month, day)
formatted_date = "{}-{:02}-{:02}".format(ad_year, ad_month, ad_day)
return formatted_date
def convert_to_bs(ad_date):
date_components = decompose_date(ad_date)
year, month, day = date_components
bs_year, bs_month, bs_day = _ad_to_bs(year, month, day)
formatted_date = "{}-{:02}-{:02}".format(bs_year, bs_month, bs_day)
return formatted_date
|
5,077 | 839b3ebffebce95de25f75edc67a647bd1318268 | #!/usr/bin/env python
from __future__ import division
from __future__ import print_function
import numpy as np
from mpi4py import MPI
from parutils import pprint
comm = MPI.COMM_WORLD
pprint("-"*78)
pprint(" Running on %d cores" % comm.size)
pprint("-"*78)
comm.Barrier()
# Prepare a vector of N=5 elements to be broadcasted...
N = 5
if comm.rank == 0:
A = np.arange(N, dtype=np.float64) # rank 0 has proper data
else:
A = np.zeros(N, dtype=np.float64) # rank 0 has proper data
print("rank {0}: {1}".format(comm.rank, A))
comm.Barrier()
# Broadcast A from rank 0 to everybody
comm.Bcast( [A, MPI.DOUBLE], root=0)
# Everybody should now have the same...
print("[%02d] %s" % (comm.rank, A))
|
5,078 | f1b36e3ce3189c8dca2e41664ac1a6d632d23f79 | import ssl
import sys
import psycopg2 #conectarte python con postresql
import paho.mqtt.client #pip install paho-mqtt
import json
conn = psycopg2.connect(host = 'raja.db.elephantsql.com', user= 'oyoqynnr', password ='myHVlpJkEO21o29GKYSvMCGI3g4y05bh', dbname= 'oyoqynnr')
def on_connect(client, userdata, flags, rc):
print('Conectado (%s)' % client._client_id)
client.subscribe(topic='unimet/#', qos = 0)
def ventasTIENDA(client, userdata, message):
a = json.loads(message.payload)
print(a)
cur = conn.cursor()
sql = '''INSERT INTO ventas (time_stamp, id_tienda, mac_add, monto) VALUES ( %s, %s, %s, %s);'''
cur.execute(sql, (a["DATE"],a["ID_TIENDA"],a["MAC_ADD"],a["MONTO"]))
conn.commit()
print('VENTA EFECTUADA')
print('------------------------------')
def main():
client = paho.mqtt.client.Client()
client.on_connect = on_connect
client.message_callback_add('unimet/ventas', ventasTIENDA)
client.connect("broker.hivemq.com",1883,60)
client.loop_forever()
if __name__ == '__main__':
main()
sys.exit(0)
|
5,079 | 1db397df2d030b2f622e701c46c15d653cb79e55 |
from ParseTree import ParseTree
from Node import Node
from NodeInfo import NodeInfo
from TreeAdjustor import TreeAdjustor
from model.SchemaGraph import SchemaGraph
class TreeAdjustorTest:
schema = None
def __init__(self):
return
def getAdjustedTreesTest(self):
T = ParseTree()
nodes = [Node(index=-1, word="DEFAULT", posTag="DEFAULT") for i in range(0, 8)]
nodes[0] = Node(index=0, word="ROOT", posTag="--")
nodes[0].info = NodeInfo(type="ROOT", value="ROOT")
nodes[1] = Node(index=1, word="return", posTag="--")
nodes[1].info = NodeInfo(type="SN", value="SELECT")
nodes[2] = Node(index=2, word="conference", posTag="--")
nodes[2].info = NodeInfo(type="NN", value="Author")
nodes[3] = Node(index=3, word="area", posTag="--")
nodes[3].info = NodeInfo(type="NN", value="Title")
nodes[4] = Node(index=4, word="papers", posTag="--")
nodes[4].info = NodeInfo(type="NN", value="Author")
nodes[5] = Node(index=5, word="citations", posTag="--")
nodes[5].info = NodeInfo(type="NN", value="Journal")
nodes[6] = Node(index=6, word="most", posTag="--")
nodes[6].info = NodeInfo(type="FN", value=">")
nodes[7] = Node(index=7, word="total", posTag="--")
nodes[7].info = NodeInfo(type="FN", value="Year")
T.root = nodes[0]
nodes[0].children.append(nodes[1])
nodes[1].parent = nodes[0]
nodes[1].children.append(nodes[2])
nodes[2].parent = nodes[1]
nodes[2].children.append(nodes[3])
nodes[3].parent = nodes[2]
nodes[2].children.append(nodes[4])
nodes[4].parent = nodes[2]
nodes[4].children.append(nodes[5])
nodes[5].parent = nodes[4]
nodes[5].children.append(nodes[6])
nodes[6].parent = nodes[5]
nodes[5].children.append(nodes[7])
nodes[7].parent = nodes[5]
print ("===========test for Running getAdjustedTrees() in TreeAdjustor===========")
print ("The original tree:")
print (T.toString())
print ("Number of possible trees for choice:")
obj = TreeAdjustor()
result = TreeAdjustor.getAdjustedTrees(T)
# result = TreeAdjustor.adjust(T)
print (len(result))
# result = sorted(result,cmp=TreeAdjustorTest.cmpp)
# l =sorted(m, cmp =TreeAdjustor.timeStampCompare)
for i in range(0, len(result)):
for j in range(i+1, len(result)):
if(result[i].getScore() <= result[j].getScore()):
temp = result[i]
result[i] =result[j]
result[j] = temp
print ("The three trees with highest scores look like:")
for i in range(0,5):
print (result[i])
for tree in result:
print (" treeList Result %s:%d" % (tree.getSentence(), tree.getScore()))
tree.insertImplicitNodes()
query = tree.translateToSQL(self.schema)
print ("qUERY: " + query.toString())
def adjustTest(self):
T = ParseTree()
nodes = [Node(index=-1, word="DEFAULT", posTag="DEFAULT") for i in range(0, 9)]
nodes[0] = Node(index=0, word="ROOT",posTag= "--")
nodes[0].info = NodeInfo(type="ROOT", value="ROOT")
nodes[1] = Node(index=1, word="return", posTag="--")
nodes[1].info = NodeInfo(type="SN", value="SELECT")
nodes[2] = Node(index=2, word="conference", posTag="--")
nodes[2].info = NodeInfo(type="NN", value="Author")
nodes[3] = Node(index=3, word="area", posTag="--")
nodes[3].info =NodeInfo(type="NN", value="Title")
nodes[4] =Node(index=4, word="each", posTag="--")
nodes[4].info = NodeInfo(type="QN", value=">")
nodes[5] = Node(index=5, word="papers", posTag="--")
nodes[5].info = NodeInfo(type="NN", value="Author")
nodes[6] = Node(index=6, word="citations", posTag="--")
nodes[6].info = NodeInfo(type="NN", value="Journal")
nodes[7] = Node(index=7, word="most", posTag="--")
nodes[7].info = NodeInfo(type="FN", value=">")
nodes[8] = Node(index=8, word="total", posTag="--")
nodes[8].info = NodeInfo(type="FN", value="Year")
T.root = nodes[0]
nodes[0].children.append(nodes[1])
nodes[1].parent = nodes[0]
nodes[1].children.append(nodes[2])
nodes[2].parent = nodes[1]
nodes[2].children.append(nodes[3])
nodes[3].parent = nodes[2]
nodes[2].children.append(nodes[5])
nodes[5].parent = nodes[2]
nodes[3].children.append(nodes[4])
nodes[4].parent = nodes[3]
nodes[5].children.append(nodes[6])
nodes[6].parent = nodes[5]
nodes[6].children.append(nodes[7])
nodes[7].parent = nodes[6]
nodes[6].children.append(nodes[8])
nodes[8].parent = nodes[6]
print ("===========test for Running adjust() in TreeAdjustor===========")
treeList = TreeAdjustor.adjust(T)
print ("Output size: %d"%len(treeList))
print ("Output trees:")
ctr=0
for tr in treeList:
print ("Tree %d %s"%(ctr, tr.getSentence()))
ctr+=1
@staticmethod
def cmpp(a,b):
return a.getScore() > b.getScore()
obj = TreeAdjustorTest()
obj.getAdjustedTreesTest()
# obj.adjustTest()
|
5,080 | 513aff6cf29bbce55e2382943767a9a21df2e98e | #-*-coding:utf-8-*-
from Classify import get_train_data
import sys
'''
获取训练集数据
'''
get_train_data(sys.argv[1], sys.argv[2]) |
5,081 | dd7896e3beb5e33282b38efe0a4fc650e629b185 | from gym_mag.envs.mag_control_env import MagControlEnv
|
5,082 | d86fe165e378e56650e3b76bf3d0f72e2a50a023 | import requests
rsp = requests.get('https://api.weixin.qq.com/cgi-bin/token?grant_type=client_credential&appid=%s&secret=%s'%('wx27c0e6ef6a7f0716','6e29e232daf462652f66ee8acc11838b'))
print(rsp.text) |
5,083 | e8e78610df4461a96f7d9858870de0e3482801fd | #!/usr/bin/env python
import argparse
import requests
import sys
import os
import xml.dom.minidom
__author__ = 'Tighe Schlottog || tschlottog@paloaltonetworks.com'
'''
wf.py is a script to interact with the WildFire API to upload files or pull back reports on specific hashes. You
need to have the argparse and requests installed. Both modules perform their functions perfectly for the work that
is looking to be completed.
For functional assistance, check out the -h or --help options while executing the wf.py script.
Currently the script is configured to use the WildFire public cloud, but you can easily adapt it to use your WF-500.
This script is only for use with file uploads and report pulling.
File uploads are completed and the WildFire reported SHA256 hash will be output.
Report pulls are written in the format of wildfire-report-<SHA256 hash>.<report format>, they can be either PDF or
XML.
'''
# Global Variables (only edit these)
wf_upload_url = 'https://wildfire.paloaltonetworks.com/publicapi/submit/file'
wf_report_url = 'https://wildfire.paloaltonetworks.com/publicapi/get/report'
def parse_args():
'''
This function is used to parse the CLI arguments that are passed into the function, after parsing the data it will
return both the parser itself and the parsed arguments. While not needed, the parser is passed back in case of
future need.
:return: parser - the argparse parser itself
:return: args - the parsed CLI arguments
'''
parser = argparse.ArgumentParser(description='Script to upload unknown files to WildFire.')
group = parser.add_mutually_exclusive_group()
group.add_argument('-f', '--file', type=str, help='Location of file to upload to WildFire')
group.add_argument('-d', '--dir', type=str, help='Location of directory of files to upload to WildFire')
parser.add_argument('-hash', type=str, help='SHA256 hash of file to pull report from WildFire')
parser.add_argument('-api_key', type=str, help='WildFire API Key')
parser.add_argument('-format', type=str, default='pdf', help='Report file format (either xml or pdf)')
parser.add_argument('-hf', '--hashfile', type=str, help='File of hashes to pull reports from WildFire')
args = parser.parse_args()
check_args(parser, args)
return parser, args
def check_args(parser, wf_args):
'''
This function will take in the parser and the parsed args and will perform some basic verification checks. The
checks themselves are more complicated than rules that I can feed into the argparse module.
:param parser: argparse parser
:param wf_args: parsed CLI arguments, came from the parser argparse handler
:return: Nothing, this is just a basic verification check. The function will exit the entire script if it doesn't
pass muster.
'''
if not (((wf_args.file or wf_args.dir) or ((str(wf_args.format).lower() != 'xml' or str(wf_args.format).lower() != 'pdf')and wf_args.hash)) and wf_args.api_key):
print "You are missing one of the necessary options, please check your command structure and try again."
parser.print_help()
sys.exit()
def wf_error_codes(error_code):
'''
This function will take in the HTTP error codes from the requests function in both the upload and download functions
and parse them out into human readable error messages.
:param error_code: http error code from the requests module functions (req_handler.status_code)
:return: Nothing, this will dump human readable errors and exit the script.
'''
if error_code == 401:
print "HTTP Error %s: API Key is invalid, please retry with valid WildFire API key" % error_code
sys.exit()
elif error_code == 404:
print 'HTTP Error %s: Cannot find report associated with requested hash' % error_code
sys.exit()
elif error_code == 405:
print 'HTTP Error %s: You must use the POST method for this call' % error_code
sys.exit()
elif error_code == 413:
print "HTTP Error %s: Sample file size exceeds maximum WildFire allowed size" % error_code
sys.exit()
elif error_code == 418:
print "HTTP Error %s: Sample file type is unsupported" % error_code
sys.exit()
elif error_code == 419:
print "HTTP Error %s: You have exceeded your maximum number of requests per day" % error_code
sys.exit()
elif error_code == 420:
print "HTTP Error %s: Insufficient arguments for accessing the API" % error_code
sys.exit()
elif error_code == 421:
print 'HTTP Error %s: Invalid arguments for accessing the API' % error_code
sys.exit()
elif error_code == 500:
print "HTTP Error %s: WildFire cloud is currently experiencing issues, please try again later" % error_code
sys.exit()
elif error_code == 513:
print 'HTTP Error %s: File upload to WildFire has failed, please check file and try again' % error_code
sys.exit()
else:
print 'An unknown error has occurred, the HTTP status code is ', error_code
sys.exit()
def upload_wf_control(wf_args):
'''
This is a control function to access the upload_wf_file function. For directories, it will look through all the
files in the directory and upload them. For single files, it will push through the single upload.
:param wf_args: These are the parsed CLI arguments from the previous parse_args function.
:return: Nothing, this is a control function which calls another function.
'''
if wf_args.dir:
try:
for file in os.listdir(wf_args.dir):
upload_wf_file(wf_args, '%s/%s' %(wf_args.dir, file))
except OSError as err:
print '%s -> %s' % (err.strerror, wf_args.dir)
elif wf_args.file:
upload_wf_file(wf_args, wf_args.file)
else:
print 'Something went wrong, you should never see this error.'
sys.exit()
def upload_wf_file(wf_args, filename):
'''
This function is used to upload files into the WildFire Cloud
:param wf_args: This is the parsed CLI arguments from the called parse_args function.
:param wf_file: This is the name of the file from either the args.file or from the read directory on args.dir
:return: Nothing, this function only uploads files into the WildFire Cloud.
'''
global wf_upload_url
wf_headers = {'apikey': wf_args.api_key}
try:
wf_file = {'file': open(filename, 'rb')}
except IOError as err:
print 'Unable to open file "%s", %s' % (wf_file, err.strerror)
sys.exit()
try:
wf_req = requests.post(wf_upload_url, data=wf_headers, files=wf_file)
except requests.exceptions.ConnectionError:
print 'An error has occurred contacting %s, please check the URL and try again.' % wf_upload_url
sys.exit()
if wf_req.status_code != requests.codes.ok:
wf_error_codes(wf_req.status_code)
else:
print 'Successfully uploaded %s with SHA256 hash %s' % (filename, xml.dom.minidom.parseString(wf_req.text).getElementsByTagName('sha256')[0].firstChild.nodeValue)
def pull_wf_report(hash, args):
'''
This function will pull down reports from the WildFire Cloud. It can be pulled down in either PDF or XML formats,
the reports will then be written to the file of the appropriate type.
:param args: This is the parsed CLI arguments from the called parse_args function. All components needed will be
pulled from this passed parameter.
:return: Nothing, this function only pulls down reports from the WildFire Cloud.
'''
global wf_report_url
wf_headers = {"apikey": args.api_key, "hash": hash, "format": str(args.format).lower()}
wf_filename = 'wildfire-report-%s.%s' % (hash, str(args.format).lower())
try:
wf_req = requests.post(wf_report_url, data=wf_headers)
except requests.exceptions.ConnectionError:
print 'An error has occurred contacting %s, please check the URL and try again.' % wf_report_url
sys.exit()
if wf_req.status_code != requests.codes.ok:
wf_error_codes(wf_req.status_code)
else:
print 'Successfully pulled report wildfire-report-%s.%s' % (hash, str(args.format).lower())
with open(wf_filename, 'wb') as wf_dataout:
wf_dataout.write(wf_req.content)
def multi_hash(args):
'''
This function will roll through a file one line at a time to pull the associated hashes on that line. It will
assume that there is a single hash per line and chop off anything after a space.
:param args: This is the parsed CLI arguments from the called parse_args function. All components needed will be
pulled from this passed parameter.
:return: Nothing, this function only loops and calls the pull_wf_report function for pulling reports.
'''
with open(args.hashfile, 'r') as hashes:
for hash in hashes:
hash = hash.split() # Drop anything after a space character
pull_wf_report(hash, args)
def main():
args_parser, args = parse_args()
if args.hash:
pull_wf_report(args.hash, args)
elif args.hashfile:
multi_hash(args)
else:
upload_wf_control(args)
pass
if __name__ == '__main__':
main()
|
5,084 | 7de3c0ab2e7c8ac00d37f1dfb5948027cfa7806c | #########################################################
# Author: Todd A. Reisel
# Date: 2/24/2003
# Class: StaticTemplateList
#########################################################
from BaseClasses.TemplateList import *;
class StaticTemplateList(TemplateList):
def __init__(self, viewMode = None):
TemplateList.__init__(self, viewMode);
def getList(self):
return [ ["graphical", "interface.html"], ["ada", "interface.html"] ];
def getFeatureName(self):
return "static";
|
5,085 | f178ae70ce54244624c2254d0d6256b83144db33 | import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
# Define a function to compute color histogram features
# Pass the color_space flag as 3-letter all caps string
# like 'HSV' or 'LUV' etc.
# KEEP IN MIND IF YOU DECIDE TO USE THIS FUNCTION LATER
# IN YOUR PROJECT THAT IF YOU READ THE IMAGE WITH
# cv2.imread() INSTEAD YOU START WITH BGR COLOR!
def bin_spatial(img, color_space='RGB', size=(32, 32)):
colour_dict = { 'RGB':'RGB',
'BGR':cv2.COLOR_BGR2RGB,
'HLS':cv2.COLOR_BGR2HLS,
'HSV':cv2.COLOR_BGR2HSV,
'LUV':cv2.COLOR_BGR2LUV,
'YUV': cv2.COLOR_RGB2YUV,
'YCrCb': cv2.COLOR_RGB2YCrCb
}
# If someother Colour Space
if color_space.upper() != 'RGB':
method = colour_dict.get(color_space, 'RGB')
img = cv2.cvtColor(img, method)
else:
img = np.copy(img)
small_img = cv2.resize(img, size)
feature_vec = small_img.ravel()
# Return the feature vector
return feature_vec
if __name__ == "__main__":
# You can also read cutout2, 3, 4 etc. to see other examples
image = mpimg.imread('cutout1.jpg')
feature_vec = bin_spatial(image, color_space='HSV', size=(32, 32))
# Plot features
plt.plot(feature_vec)
plt.title('Spatially Binned Features')
##
## Solution
##
# Define a function to compute color histogram features
# Pass the color_space flag as 3-letter all caps string
# like 'HSV' or 'LUV' etc.
# def bin_spatial(img, color_space='RGB', size=(32, 32)):
# # Convert image to new color space (if specified)
# if color_space != 'RGB':
# if color_space == 'HSV':
# feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
# elif color_space == 'LUV':
# feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2LUV)
# elif color_space == 'HLS':
# feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
# elif color_space == 'YUV':
# feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2YUV)
# elif color_space == 'YCrCb':
# feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2YCrCb)
# else: feature_image = np.copy(img)
# # Use cv2.resize().ravel() to create the feature vector
# features = cv2.resize(feature_image, size).ravel()
# # Return the feature vector
# return features |
5,086 | 6ce50552571594c7be77ac0bf3b5274f2f39e545 | class Circle():
def __init__(self, radius, color="white"):
self.radius = radius
self.color = color
c1 = Circle(10, "black")
print("半径:{}, 色: {}".format(c1.radius, c1.color)) |
5,087 | 4122da21abab462a28c925c1afa5792ec729a75a | import re
print("Welcome to the Python Calculator")
print("To stop calculator type: quit")
previous = 0
run = True
def perform_math():
'''(numbers) -> numbers
accepts numbers from the user and performs continuous
mathematical equations on them.
precondition input must be numbers and mathematical signs
'''
global run
global previous
equation = ""
if previous == 0:
equation = input("Type in an Equation:")
else:
equation = input(str(previous))
#Is it too much to want to figure out a way to "force" numerical input?
if equation == "quit":
run = False
else:
equation = re.sub('[a-zA-Z,:()" "]', '' , equation)
if previous == 0:
previous = eval(equation)
else:
previous = eval(str(previous) + equation)
while run:
perform_math()
|
5,088 | 305554fc86ddc116677b6d95db7d94d9f2213c41 | from .line_detection_research import score_pixel_v3p2 |
5,089 | 3bb25cedc29f9063046329db1c00e7d9e10ce1cc | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
from multiprocess.managers import BaseManager
from linphonebase import LinphoneBase
class MyManager(BaseManager):
pass
MyManager.register('LinphoneBase', LinphoneBase)
manager = MyManager()
manager.start()
linphoneBase = manager.LinphoneBase()
|
5,090 | 5aaac757b766b0143ca3ea54d8fc4b8936160ec7 | from django.urls import path
from . import views
# url configuration for view.index function
app_name = 'movies'
urlpatterns = [
path('', views.index, name='index'), # represents a root of this app
path('<int:movie_id>', views.detail, name='detail')
]
|
5,091 | a7de079866d7ac80260b438043cf0403f598cebc | '''
Write the necessary code to display the area and perimeter of a rectangle that has a width of 2.4 and a height of 6.4.
'''
x, y = 2.4, 6.4
perimeter = (x*2)+(y*2)
area = x*y
print("Perimeter is "+str(perimeter) + ", Area is " + str(area)) |
5,092 | 4ad3390f8f2c92f35acde507be7a7b713af997f2 | from odoo import models, fields, api
class Aceptar_letras_wizard(models.TransientModel):
_name = 'aceptar_letras_wizard'
_description = "Aceptar letras"
def _get_letras(self):
if self.env.context and self.env.context.get('active_ids'):
return self.env.context.get('active_ids')
return []
letra_ids = fields.Many2many('letra_cambio.letra', default=_get_letras, string='Letras')
@api.multi
def aceptar_letras(self):
active_ids = self.env.context.get('active_ids', []) or []
records = self.env['letra_cambio.letra'].browse(active_ids)
self.env['letra_cambio.letra'].cambiar_estado_all(records, "ACE")
|
5,093 | 68bcb76a9c736e21cc1f54c6343c72b11e575b5d | import time
import torch
from torch.utils.data import DataLoader
from nn_model import NNModel
def train(dataset: 'Dataset', epochs: int=10):
loader = DataLoader(dataset, batch_size=2, shuffle=True)
model = NNModel(n_input=2, n_output=3)
# model.to(device='cpu')
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
criterion = torch.nn.CrossEntropyLoss()
start_tm = time.time()
for epoch in range(1, epochs+1):
train_loss = 0.0
train_acc = 0
for x, y in loader:
optimizer.zero_grad()
y_pred = model(x)
y = torch.max(torch.squeeze(y, dim=1), dim=1).indices
loss = criterion(y_pred, y)
loss.backward()
optimizer.step()
train_loss += loss.item()
train_acc += (y_pred.argmax(1) == y).sum().item()
print(f'[epoch {epoch:02d}]\tloss:{train_loss}\taccuracy:{train_acc}')
finish_tm = time.time()
print(f'train finished.({finish_tm-start_tm}sec)')
|
5,094 | be867d600f5f267986368f5573006f63004dbf9e | seq = input('write a sequence of numbers: ')
print(seq.split(','))
print(tuple(seq.split(',')))
|
5,095 | c9f4ae94dc901d34a3c0fb4371c8d35a7fe94507 | """Exercise 7.2. Encapsulate this loop in a function called square_root that takes a as a parameter,
chooses a reasonable value of x, and returns an estimate of the square root of a."""
def my_square_root(a,x) :
e = 0.0001
while True :
y=(x+a/x)/2
if abs(y-x) < e :
return y
break
x = y
a = input("Find square root of which number? ",)
x = input("What is your first guess?")
result = round(my_square_root(float(a),float(x)),3)
print("The square root of ",a,"is ",result)
|
5,096 | fed94e0affa1fe6c705577a63fabee839aa9f05c | # Generated by Django 2.0.1 on 2018-05-01 11:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rover', '0002_auto_20180501_1431'),
]
operations = [
migrations.CreateModel(
name='RoverPage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('design_review', models.FileField(blank=True, upload_to='documents/rover')),
],
options={
'verbose_name_plural': 'Rover Page',
'verbose_name': 'Rover Page',
},
),
]
|
5,097 | eb99def75404bc3b674bcb633714009149f2d50d | # Named Entity Recognition on Medical Data (BIO Tagging)
# Bio-Word2Vec Embeddings Source and Reference: https://github.com/ncbi-nlp/BioWordVec
import os
import re
import torch
import pickle
from torch import nn
from torch import optim
import torch.nn.functional as F
import numpy as np
import random
from DNC.dnc import DNC_Module # Importing DNC Implementation
class task_NER():
def __init__(self):
self.name = "NER_task_bio"
# Controller Params
self.controller_size = 128
self.controller_layers = 1
# Head Params
self.num_read_heads = 1
self.num_write_heads = 1
# Processor Params
self.num_inputs = 200 # Length of Embeddings
self.num_outputs = 7 # Class size
# Memory Params
self.memory_N = 128
self.memory_M = 128
# Training Params
self.num_batches = -1
self.save_batch = 5 # Saving model after every save_batch number of batches
self.batch_size = 10
self.num_epoch = 4
# Optimizer Params
self.adam_lr = 1e-4
self.adam_betas = (0.9, 0.999)
self.adam_eps = 1e-8
# Handles
self.machine = None
self.loss = None
self.optimizer = None
# Class Dictionaries
self.labelDict = None # Label Dictionary - Labels to Index
self.reverseDict = None # Inverse Label Dictionary - Index to Labels
# File Paths
self.concept_path_train = "../medical_data/train_data/concept" # Path to train concept files
self.text_path_train = "../medical_data/train_data/txt" # Path to train text summaries
self.concept_path_test = "../medical_data/test_data/concept" # Path to test concept files
self.text_path_test = "../medical_data/test_data/txt" # Path to test text summaries
self.save_path = "../medical_data/cleaned_files" # Save path
self.embed_dic_path = "../medical_data/embeddings/bio_embedding_dictionary.dat" # Word2Vec embeddings Dictionary path
self.random_vec = "../medical_data/embeddings/random_vec.dat" # Path to random embedding (Used to create new vectors)
self.model_path = "../saved_models/" # Stores Trained Models
# Miscellaneous
self.padding_symbol = np.full((self.num_inputs), 0.01) # Padding symbol embedding
def get_task_name(self):
return self.name
def init_dnc(self):
self.machine = DNC_Module(self.num_inputs, self.num_outputs, self.controller_size, self.controller_layers, self.num_read_heads, self.num_write_heads, self.memory_N, self.memory_M)
def init_loss(self):
self.loss = nn.CrossEntropyLoss(reduction = 'mean') # Cross Entropy Loss -> Softmax Activation + Cross Entropy Loss
def init_optimizer(self):
self.optimizer = optim.Adam(self.machine.parameters(), lr = self.adam_lr, betas = self.adam_betas, eps = self.adam_eps)
def calc_loss(self, Y_pred, Y):
# Y: dim -> (sequence_len x batch_size)
# Y_pred: dim -> (sequence_len x batch_size x num_outputs)
loss_vec = torch.empty(Y.shape[0], dtype=torch.float32)
for i in range(Y_pred.shape[0]):
loss_vec[i] = self.loss(Y_pred[i], Y[i])
return torch.mean(loss_vec)
def calc_cost(self, Y_pred, Y): # Calculates % Cost
# Y: dim -> (sequence_len x batch_size)
# Y_pred: dim -> (sequence_len x batch_size x sequence_width)
'''
Note:
1). For considering an prediction to be True Positive, prediction must match completely with labels entity (not partially). Else it is False Negative.
2). For considering a prediction to be False Positive, it must be full entity (BIII) and not completely match the label entity.
'''
# Stores correct class labels for each entity type
class_bag = {}
class_bag['problem'] = 0 # Total labels
class_bag['test'] = 0 # Total labels
class_bag['treatment'] = 0 # Total labels
class_bag['problem_cor'] = 0 # Correctly classified labels
class_bag['test_cor'] = 0 # Correctly classified labels
class_bag['treatment_cor'] = 0 # Correctly classified labels
class_bag['problem_fp'] = 0 # False positive classified labels
class_bag['test_fp'] = 0 # False positive classified labels
class_bag['treatment_fp'] = 0 # False positive classified labels
pred_class = np.transpose(F.softmax(Y_pred, dim=2).max(2)[1].numpy()).reshape(-1) # Predicted class. dim -> (sequence_len*batch_size)
Y = np.transpose(Y.numpy()).reshape(-1) # Converting to NumPy Array and linearizing
cor_pred = (Y == pred_class).astype(np.int) # Comparing Prediction and Labels to find correct predictions
class_bag['word_pred_acc'] = np.divide(np.sum(cor_pred), cor_pred.size)*100.0 # % Accuracy of Correctly Predicted Words (Not Entities)
# Getting the beginning index of all the entities
beg_idx = list(np.where(np.in1d(Y, [0, 2, 4]))[0])
# Getting the end index of all the entities (All the Index previous of 'Other'/'Begin' and not equal to 'Other')
target = np.where(np.in1d(Y, [0, 2, 4, 6]))[0] - 1
if target[0] == -1:
target = target[1:]
end_idx = list(target[np.where(Y[target] != 6)[0]])
if Y[-1] != 6:
end_idx.append(Y.size-1)
assert len(beg_idx) == len(end_idx) # Sanity Check
class_bag['total'] = len(beg_idx) # Total number of Entities
# Counting Entities
sum_vec = np.cumsum(cor_pred) # Calculates cumulative summation of predicted vector
for b, e in zip(beg_idx, end_idx):
idx_range = e-b+1 # Entity span
sum_range = sum_vec[e]-sum_vec[b]+1 # Count of entity elements which are predicted correctly
lab = self.reverseDict[Y[b]][2:] # Extracting entity type (Problem, Test or Treatment)
class_bag[lab] = class_bag[lab]+1 # Getting count of each entities
if sum_range == idx_range: # +1 if entity is classified correctly
class_bag[lab+'_cor'] = class_bag[lab+'_cor']+1
# Detecting False Positives
# Getting the beginning index of all the entities in Predicted Results
beg_idx_p = list(np.where(np.in1d(pred_class, [0, 2, 4]))[0])
for b in beg_idx_p:
if cor_pred[b] == 0:
lab = self.reverseDict[pred_class[b]][2:]
class_bag[lab+'_fp'] = class_bag[lab+'_fp']+1
return class_bag
def print_word(self, token_class): # Prints the Class name from Class number
word = self.reverseDict[token_class]
print(word + "\n")
def clip_grads(self): # Clipping gradients for stability
"""Gradient clipping to the range [10, 10]."""
parameters = list(filter(lambda p: p.grad is not None, self.machine.parameters()))
for p in parameters:
p.grad.data.clamp_(-10, 10)
def initialize_labels(self): # Initializing label dictionaries for Labels->IDX and IDX->Labels
self.labelDict = {} # Label Dictionary - Labels to Index
self.reverseDict = {} # Inverse Label Dictionary - Index to Labels
# Using BIEOS labelling scheme
self.labelDict['b-problem'] = 0 # Problem - Beginning
self.labelDict['i-problem'] = 1 # Problem - Inside
self.labelDict['b-test'] = 2 # Test - Beginning
self.labelDict['i-test'] = 3 # Test - Inside
self.labelDict['b-treatment'] = 4 # Treatment - Beginning
self.labelDict['i-treatment'] = 5 # Treatment - Inside
self.labelDict['o'] = 6 # Outside Token
# Making Inverse Label Dictionary
for k in self.labelDict.keys():
self.reverseDict[self.labelDict[k]] = k
# Saving the diictionaries into a file
self.save_data([self.labelDict, self.reverseDict], os.path.join(self.save_path, "label_dicts_bio.dat"))
def parse_concepts(self, file_path): # Parses the concept file to extract concepts and labels
conceptList = [] # Stores all the Concept in the File
f = open(file_path) # Opening and reading a concept file
content = f.readlines() # Reading all the lines in the concept file
f.close() # Closing the concept file
for x in content: # Reading each line in the concept file
dic = {}
# Cleaning and extracting the entities, labels and their positions in the corresponding medical summaries
x = re.sub('\n', ' ', x)
x = re.sub(r'\ +', ' ', x)
x = x.strip().split('||')
temp1, label = x[0].split(' '), x[1].split('=')[1][1:-1]
temp1[0] = temp1[0][3:]
temp1[-3] = temp1[-3][0:-1]
entity = temp1[0:-2]
if len(entity) >= 1:
lab = ['i']*len(entity)
lab[0] = 'b'
lab = [l+"-"+label for l in lab]
else:
print("Data in File: " + file_path + ", not in expected format..")
exit()
noLab = [self.labelDict[l] for l in lab]
sLine, sCol = int(temp1[-2].split(":")[0]), int(temp1[-2].split(":")[1])
eLine, eCol = int(temp1[-1].split(":")[0]), int(temp1[-1].split(":")[1])
'''
# Printing the information
print("------------------------------------------------------------")
print("Entity: " + str(entity))
print("Entity Label: " + label)
print("Labels - BIO form: " + str(lab))
print("Labels Index: " + str(noLab))
print("Start Line: " + str(sLine) + ", Start Column: " + str(sCol))
print("End Line: " + str(eLine) + ", End Column: " + str(eCol))
print("------------------------------------------------------------")
'''
# Storing the information as a dictionary
dic['entity'] = entity # Entity Name (In the form of list of words)
dic['label'] = label # Common Label
dic['BIO_labels'] = lab # List of BIO labels for each word
dic['label_index'] = noLab # Labels in the index form
dic['start_line'] = sLine # Start line of the concept in the corresponding text summaries
dic['start_word_no'] = sCol # Starting word number of the concept in the corresponding start line
dic['end_line'] = eLine # End line of the concept in the corresponding text summaries
dic['end_word_no'] = eCol # Ending word number of the concept in the corresponding end line
# Appending the concept dictionary to the list
conceptList.append(dic)
return conceptList # Returning the all the concepts in the current file in the form of dictionary list
def parse_summary(self, file_path): # Parses the Text summaries
file_lines = [] # Stores the lins of files in the list form
tags = [] # Stores corresponding labels for each word in the file (Default label: 'o' [Outside])
default_label = len(self.labelDict)-1 # default_label is "7" (Corresponding to 'Other' entity)
# counter = 1 # Temporary variable used during print
f = open(file_path) # Opening and reading a concept file
content = f.readlines() # Reading all the lines in the concept file
f.close()
for x in content:
x = re.sub('\n', ' ', x)
x = re.sub(r'\ +', ' ', x)
file_lines.append(x.strip().split(" ")) # Spliting the lines into word list and Appending each of them in the file list
tags.append([default_label]*len(file_lines[-1])) # Assigining the default_label to all the words in a line
'''
# Printing the information
print("------------------------------------------------------------")
print("File Lines No: " + str(counter))
print(file_lines[-1])
print("\nCorresponding labels:")
print(tags[-1])
print("------------------------------------------------------------")
counter += 1
'''
assert len(tags[-1]) == len(file_lines[-1]), "Line length is not matching labels length..." # Sanity Check
return file_lines, tags
def modify_labels(self, conceptList, tags): # Modifies the default labels of each word in text files with the true labels from the concept files
for e in conceptList: # Iterating over all the dictionary elements in the Concept List
if e['start_line'] == e['end_line']: # Checking whether concept is spanning over a single line or multiple line in the summary
tags[e['start_line']-1][e['start_word_no']:e['end_word_no']+1] = e['label_index'][:]
else:
start = e['start_line']
end = e['end_line']
beg = 0
for i in range(start, end+1): # Distributing labels over multiple lines in the text summaries
if i == start:
tags[i-1][e['start_word_no']:] = e['label_index'][0:len(tags[i-1])-e['start_word_no']]
beg = len(tags[i-1])-e['start_word_no']
elif i == end:
tags[i-1][0:e['end_word_no']+1] = e['label_index'][beg:]
else:
tags[i-1][:] = e['label_index'][beg:beg+len(tags[i-1])]
beg = beg+len(tags[i-1])
return tags
def print_data(self, file, file_lines, tags): # Prints the given data
counter = 1
print("\n************ Printing details of the file: " + file + " ************\n")
for x in file_lines:
print("------------------------------------------------------------")
print("File Lines No: " + str(counter))
print(x)
print("\nCorresponding labels:")
print([self.reverseDict[i] for i in tags[counter-1]])
print("\nCorresponding Label Indices:")
print(tags[counter-1])
print("------------------------------------------------------------")
counter += 1
def save_data(self, obj_list, s_path): # Saves the file into the binary file using Pickle
# Note: The 'obj_list' must be a list and none other than that
pickle.dump(tuple(obj_list), open(s_path,'wb'))
def acquire_data(self, task): # Read all the concept files to get concepts and labels, proces them and save them
data = {} # Dictionary to store all the data objects (conceptList, file_lines, tags) each indexed by file name
if task == 'train': # Determining the task type to assign the data path accordingly
t_path = self.text_path_train
c_path = self.concept_path_train
else:
t_path = self.text_path_test
c_path = self.concept_path_test
for f in os.listdir(t_path):
f1 = f.split('.')[0] + ".con"
if os.path.isfile(os.path.join(c_path, f1)):
conceptList = self.parse_concepts(os.path.join(c_path, f1)) # Parsing concepts and labels from the corresponding concept file
file_lines, tags = self.parse_summary(os.path.join(t_path, f)) # Parses the document summaries to get the written notes
tags = self.modify_labels(conceptList, tags) # Modifies he default labels to each word with the true labels from the concept files
data[f1] = [conceptList, file_lines, tags] # Storing each object in dictionary
# self.print_data(f, file_lines, tags) # Printing the details
return data
def structure_data(self, data_dict): # Structures the data in proper trainable form
final_line_list = [] # Stores words of all the files in separate sub-lists
final_tag_list = [] # Stores tags of all the files in separate sub-lists
for k in data_dict.keys(): # Extracting data from each pre-processed file in dictionary
file_lines = data_dict[k][1] # Extracting story
tags = data_dict[k][2] # Extracting corresponding labels
# Creating empty lists
temp1 = []
temp2 = []
# Merging all the lines in file into a single list. Same for corresponding labels
for i in range(len(file_lines)):
temp1.extend(file_lines[i])
temp2.extend(tags[i])
assert len(temp1) == len(temp2), "Word length not matching Label length for story in " + str(k) # Sanity Check
final_line_list.append(temp1)
final_tag_list.append(temp2)
assert len(final_line_list) == len(final_tag_list), "Number of stories not matching number of labels list" # Sanity Check
return final_line_list, final_tag_list
def padding(self, line_list, tag_list): # Pads stories with padding symbol to make them of same length
diff = 0
max_len = 0
outside_class = len(self.labelDict)-1 # Classifying padding symbol as "outside" term
# Calculating Max Summary Length
for i in range(len(line_list)):
if len(line_list[i])>max_len:
max_len = len(line_list[i])
for i in range(len(line_list)):
diff = max_len - len(line_list[i])
line_list[i].extend([self.padding_symbol]*diff)
tag_list[i].extend([outside_class]*diff)
assert (len(line_list[i]) == max_len) and (len(line_list[i]) == len(tag_list[i])), "Padding unsuccessful" # Sanity check
return np.asarray(line_list), np.asarray(tag_list) # Making NumPy array of size (batch_size x story_length x word size) and (batch_size x story_length x 1) respectively
def embed_input(self, line_list): # Converts words to vector embeddings
final_list = [] # Stores embedded words
summary = None # Temp variable
word = None # Temp variable
temp = None # Temp variable
embed_dic = pickle.load(open(self.embed_dic_path, 'rb')) # Loading word2vec dictionary using Pickle
r_embed = pickle.load(open(self.random_vec, 'rb')) # Loading Random embedding
for i in range(len(line_list)): # Iterating over all the summaries
summary = line_list[i]
final_list.append([]) # Reserving space for curent summary
for j in range(len(summary)):
word = summary[j].lower()
if word in embed_dic: # Checking for existence of word in dictionary
final_list[-1].append(embed_dic[word])
else:
temp = r_embed[:] # Copying the values of the list
random.shuffle(temp) # Randomly shuffling the word embedding to make it unique
temp = np.asarray(temp, dtype=np.float32) # Converting to NumPy array
final_list[-1].append(temp)
return final_list
def prepare_data(self, task='train'): # Preparing all the data necessary
line_list, tag_list = None, None
'''
line_list is the list of rows, where each row is a list of all the words in a medical summary
Similar is the case for tag_list, except, it stores labels for each words
'''
if not os.path.exists(self.save_path):
os.mkdir(self.save_path) # Creating a new directory if it does not exist else reading previously saved data
if not os.path.exists(os.path.join(self.save_path, "label_dicts_bio.dat")):
self.initialize_labels() # Initialize label to index dictionaries
else:
self.labelDict, self.reverseDict = pickle.load(open(os.path.join(self.save_path, "label_dicts_bio.dat"), 'rb')) # Loading Label dictionaries
if not os.path.exists(os.path.join(self.save_path, "object_dict_bio_"+str(task)+".dat")):
data_dict = self.acquire_data(task) # Read data from file
line_list, tag_list = self.structure_data(data_dict) # Structures the data into proper form
line_list = self.embed_input(line_list) # Embeds input data (words) into embeddings
self.save_data([line_list, tag_list], os.path.join(self.save_path, "object_dict_bio_"+str(task)+".dat"))
else:
line_list, tag_list = pickle.load(open(os.path.join(self.save_path, "object_dict_bio_"+str(task)+".dat"), 'rb')) # Loading Data dictionary
return line_list, tag_list
def get_data(self, task='train'):
line_list, tag_list = self.prepare_data(task)
# Shuffling stories
story_idx = list(range(0, len(line_list)))
random.shuffle(story_idx)
num_batch = int(len(story_idx)/self.batch_size)
self.num_batches = num_batch
# Out Data
x_out = []
y_out = []
counter = 1
for i in story_idx:
if num_batch<=0:
break
x_out.append(line_list[i])
y_out.append(tag_list[i])
if counter % self.batch_size == 0:
counter = 0
# Padding and converting labels to one hot vectors
x_out_pad, y_out_pad = self.padding(x_out, y_out)
x_out_array = torch.tensor(x_out_pad.swapaxes(0, 1), dtype=torch.float32) # Converting from (batch_size x story_length x word size) to (story_length x batch_size x word size)
y_out_array = torch.tensor(y_out_pad.swapaxes(0, 1), dtype=torch.long) # Converting from (batch_size x story_length x 1) to (story_length x batch_size x 1)
x_out = []
y_out = []
num_batch -= 1
yield (self.num_batches - num_batch), x_out_array, y_out_array
counter += 1
def train_model(self):
# Here, the model is optimized using Cross Entropy Loss.
loss_list = []
seq_length = []
last_batch = 0
# self.load_model(1, 99, 13) # Loading Pre-Trained model to train further
for j in range(self.num_epoch):
for batch_num, X, Y in self.get_data(task='train'):
self.optimizer.zero_grad() # Making old gradients zero before calculating the fresh ones
self.machine.initialization(self.batch_size) # Initializing states
Y_out = torch.empty((X.shape[0], X.shape[1], self.num_outputs), dtype=torch.float32) # dim: (seq_len x batch_size x num_output)
# Feeding the DNC network all the data first and then predicting output
# by giving zero vector as input and previous read states and hidden vector
# and thus training vector this way to give outputs matching the labels
embeddings = self.machine.backward_prediction(X) # Creating embeddings from data for backward calculation
temp_size = X.shape[0]
for i in range(temp_size):
Y_out[i, :, :], _ = self.machine(X[i], embeddings[temp_size-i-1]) # Passing Embeddings from backwards
loss = self.calc_loss(Y_out, Y)
loss.backward()
self.clip_grads()
self.optimizer.step()
class_bag = self.calc_cost(Y_out, Y)
corr = class_bag['problem_cor']+class_bag['test_cor']+class_bag['treatment_cor']
tot = class_bag['total']
loss_list += [loss.item()]
seq_length += [Y.shape[0]]
if (batch_num % self.save_batch) == 0:
self.save_model(j, batch_num)
last_batch = batch_num
print("Epoch: " + str(j) + "/" + str(self.num_epoch) + ", Batch: " + str(batch_num) + "/" + str(self.num_batches) + ", Loss: {0:.2f}, ".format(loss.item()) + \
"Batch Accuracy (Entity Prediction): {0:.2f} %, ".format((float(corr)/float(tot))*100.0) + "Batch Accuracy (Word Prediction): {0:.2f} %".format(class_bag['word_pred_acc']))
self.save_model(j, last_batch)
def test_model(self): # Testing the model
correct = 0
total = 0
result_dict = {}
result_dict['total_problem'] = 0 # Total labels in data
result_dict['total_test'] = 0 # Total labels in data
result_dict['total_treatment'] = 0 # Total labels in data
result_dict['correct_problem'] = 0 # Correctly classified labels
result_dict['correct_test'] = 0 # Correctly classified labels
result_dict['correct_treatment'] = 0 # Correctly classified labels
result_dict['false_positive_problem'] = 0 # False Positive labels
result_dict['false_positive_test'] = 0 # False Positive labels
result_dict['false_positive_treatment'] = 0 # False Positive labels
print("\n")
for batch_num, X, Y in self.get_data(task='test'):
self.machine.initialization(self.batch_size) # Initializing states
Y_out = torch.empty((X.shape[0], X.shape[1], self.num_outputs), dtype=torch.float32) # dim: (seq_len x batch_size x num_output)
# Feeding the DNC network all the data first and then predicting output
# by giving zero vector as input and previous read states and hidden vector
# and thus training vector this way to give outputs matching the labels
embeddings = self.machine.backward_prediction(X) # Creating embeddings from data for backward calculation
temp_size = X.shape[0]
for i in range(temp_size):
Y_out[i, :, :], _ = self.machine(X[i], embeddings[temp_size-i-1])
class_bag = self.calc_cost(Y_out, Y)
corr = class_bag['problem_cor']+class_bag['test_cor']+class_bag['treatment_cor']
tot = class_bag['total']
result_dict['total_problem'] = result_dict['total_problem'] + class_bag['problem']
result_dict['total_test'] = result_dict['total_test'] + class_bag['test']
result_dict['total_treatment'] = result_dict['total_treatment'] + class_bag['treatment']
result_dict['correct_problem'] = result_dict['correct_problem'] + class_bag['problem_cor']
result_dict['correct_test'] = result_dict['correct_test'] + class_bag['test_cor']
result_dict['correct_treatment'] = result_dict['correct_treatment'] + class_bag['treatment_cor']
result_dict['false_positive_problem'] = result_dict['false_positive_problem'] + class_bag['problem_fp']
result_dict['false_positive_test'] = result_dict['false_positive_test'] + class_bag['test_fp']
result_dict['false_positive_treatment'] = result_dict['false_positive_treatment'] + class_bag['treatment_fp']
correct += corr
total += tot
print("Test Example " + str(batch_num) + "/" + str(self.num_batches) + " processed, Batch Accuracy: {0:.2f} %, ".format((float(corr)/float(tot))*100.0) + "Batch Accuracy (Word Prediction): {0:.2f} %".format(class_bag['word_pred_acc']))
result_dict['accuracy'] = (float(correct)/float(total))*100.0
result_dict = self.calc_metrics(result_dict)
print("\nOverall Entity Prediction Accuracy: {0:.2f} %".format(result_dict['accuracy']))
return result_dict
def calc_metrics(self, result_dict): # Calculates Certain Metrices
precision_p = float(result_dict['correct_problem'])/float(result_dict['correct_problem'] + result_dict['false_positive_problem']) # Problem Precision
recall_p = float(result_dict['correct_problem'])/float(result_dict['total_problem']) # Problem Recall
precision_ts = float(result_dict['correct_test'])/float(result_dict['correct_test'] + result_dict['false_positive_test']) # Test Precision
recall_ts = float(result_dict['correct_test'])/float(result_dict['total_test']) # Test Recall
precision_tr = float(result_dict['correct_treatment'])/float(result_dict['correct_treatment'] + result_dict['false_positive_treatment']) # Treatment Precision
recall_tr = float(result_dict['correct_treatment'])/float(result_dict['total_treatment']) # Treatment Recall
f_score_p = 2*precision_p*recall_p/(precision_p+recall_p) # Problem F1 Score
f_score_ts = 2*precision_ts*recall_ts/(precision_ts+recall_ts) # Test F1 Score
f_score_tr = 2*precision_tr*recall_tr/(precision_tr+recall_tr) # Treatment F1 Score
result_dict['problem_precision'] = precision_p
result_dict['problem_recall'] = recall_p
result_dict['problem_f1'] = f_score_p
result_dict['test_precision'] = precision_ts
result_dict['test_recall'] = recall_ts
result_dict['test_f1'] = f_score_ts
result_dict['treatment_precision'] = precision_tr
result_dict['treatment_recall'] = recall_tr
result_dict['treatment_f1'] = f_score_tr
result_dict['macro_average_f1'] = (f_score_p + f_score_ts + f_score_tr)/3.0 # Macro Average F1 Score
# Micro Average F1 Score
correct_sum = result_dict['correct_problem'] + result_dict['correct_test'] + result_dict['correct_treatment']
fp_sum = result_dict['false_positive_problem'] + result_dict['false_positive_test'] + result_dict['false_positive_treatment']
total_sum = result_dict['total_problem'] + result_dict['total_test'] + result_dict['total_treatment']
precision_avg = float(correct_sum)/float(correct_sum + fp_sum)
recall_avg = float(correct_sum)/float(total_sum)
result_dict['micro_average_f1'] = 2*precision_avg*recall_avg/(precision_avg+recall_avg)
return result_dict
def save_model(self, curr_epoch, curr_batch):
# Here 'start_epoch' and 'start_batch' params below are the 'epoch' and 'batch' number from which to start training after next model loading
# Note: It is recommended to start from the 'start_epoch' and not 'start_epoch' + 'start_batch', because batches are formed randomly
if not os.path.exists(os.path.join(self.model_path, self.name)):
os.mkdir(os.path.join(self.model_path, self.name))
state_dic = {'task_name': self.name, 'start_epoch': curr_epoch + 1, 'start_batch': curr_batch + 1, 'state_dict': self.machine.state_dict(), 'optimizer_dic' : self.optimizer.state_dict()}
filename = self.model_path + self.name + "/" + self.name + "_" + str(curr_epoch) + "_" + str(curr_batch) + "_saved_model.pth.tar"
torch.save(state_dic, filename)
def load_model(self, option, epoch, batch):
path = self.model_path + self.name + "/" + self.name + "_" + str(epoch) + "_" + str(batch) + "_saved_model.pth.tar"
if option == 1: # Loading for training
checkpoint = torch.load(path)
self.machine.load_state_dict(checkpoint['state_dict'])
self.optimizer.load_state_dict(checkpoint['optimizer_dic'])
else: # Loading for testing
checkpoint = torch.load(path)
self.machine.load_state_dict(checkpoint['state_dict'])
self.machine.eval() |
5,098 | 94cbd9554e3326897147dc417d9fc8f91974786a | #!/bin/env python3
"""
https://www.hackerrank.com/challenges/triangle-quest-2
INPUT:
integer N
where 0 < N < 10
OUTPUT:
print palindromic triangle of size N
e.g.for N=5
1
121
12321
1234321
123454321
"""
for i in range(1, int(input()) + 1):
j = 1
while j < i:
print(j,end='')
j += 1
while i > 0:
print(i,end='')
i -= 1
print()
|
5,099 | 849db3a92e0544661dd465b3e7f6949f8de5633b | from PyQt5.QtWidgets import *
from select_substituents_table import *
from save_selection_dialog import *
class SelectSubsDialog(QDialog):
def __init__(self, r_group):
super().__init__()
self.r_group = r_group
self.substituents = None
self.new_set_saved = False
self.setWindowTitle(f"Select Substituents for {self.r_group}")
self.instructions_label = QLabel("Click row heading to select functional group set. Ctrl + click or Shift + click to select multiple items. Double click functional group name to view SMILES.")
self.select_subs_table = SelectSubsTable()
self.confirm_button = QPushButton("Confirm Selection")
self.confirm_button.setEnabled(False)
self.save_as_set_button = QPushButton("Save Selection as Set")
self.save_as_set_button.setEnabled(False)
self.cancel_button = QPushButton("Cancel")
self.select_subs_button_layout = QHBoxLayout()
self.select_subs_button_layout.addWidget(self.confirm_button)
self.select_subs_button_layout.addWidget(self.save_as_set_button)
self.select_subs_button_layout.addWidget(self.cancel_button)
self.select_subs_layout = QVBoxLayout()
self.select_subs_layout.addWidget(self.instructions_label)
self.select_subs_layout.addWidget(self.select_subs_table)
self.select_subs_layout.addLayout(self.select_subs_button_layout)
self.setLayout(self.select_subs_layout)
self.select_subs_table.itemSelectionChanged.connect(self.enable_save_buttons)
self.confirm_button.clicked.connect(self.save_substituents)
self.save_as_set_button.clicked.connect(self.save_selection)
self.cancel_button.clicked.connect(self.close)
def enable_save_buttons(self):
self.confirm_button.setEnabled(True)
self.save_as_set_button.setEnabled(True)
def get_substituents(self):
self.substituents = list(dict.fromkeys([item.text() for item in self.select_subs_table.selectedItems()]))
def save_substituents(self):
self.get_substituents()
self.close()
def save_selection(self):
self.get_substituents()
save_selection_dialog = SaveSelectionDialog(self.substituents)
save_selection_dialog.exec_()
if save_selection_dialog.new_set_saved:
self.new_set_saved = True
self.close()
class SelectSubsForNewSetDialog(SelectSubsDialog):
def __init__(self):
super().__init__(r_group = "New Set")
self.confirm_button.setVisible(False)
class SelectSubsEditSetDialog(SelectSubsDialog):
def __init__(self, set_name):
super().__init__(r_group = None)
self.set_name = set_name
self.setWindowTitle(f"Select Groups for {self.set_name}")
self.save_as_set_button.setVisible(False)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.