seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
36342252422 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Bandit LinUCB - Data Science Project
Group zambra
Created on Sun Nov 3 12:09:56 2019
@author: DANG
"""
from LinUCB_disjoint import LinUCB_disjoint
from LinUCB_hybride import LinUCB_hybrid
from LinUCB_dataPre import MovieLensData
import matplotlib.pyplot as plt
import time
def plot_regret_comp(list_regrets, approach, param_name, list_param):
for i in range(len(list_regrets)):
plt.plot(list_regrets[i], label=param_name+" = "+str(list_param[i]))
plt.title(approach + ' regret cumulé de différent '+param_name)
plt.xlabel('T')
plt.ylabel('regret')
plt.legend()
plt.show()
if __name__ == '__main__':
'''find best parameters for linUCB'''
if 'data' not in locals():
print('preparing data')
data = MovieLensData()
niter = 500
alpha = 1.6
delta = 0 # noise
users = [0]
#for linUCB disjoint
lambda_s = [1., 1.5, 1.8]
#for linUCB hybrid
lambda_theta = 1.
lambda_betas = [1.0, 1.2, 1.5]
approach = 'hybrid'
list_regrets = []
if approach == 'disjoint':
for lambda_ in lambda_s:
start = time.time()
lin_ucb = LinUCB_disjoint(data, alpha, lambda_, delta)
regrets_dis, r_dis, films_rec_dis, r_taken_mean_dis, r_taken_ucb_dis = lin_ucb.fit(users, niter)
list_regrets.append(regrets_dis)
end = time.time()
print("LinUCB disjoint time used: {}".format(end - start))
elif approach == 'hybrid':
for lambda_beta in lambda_betas:
start = time.time()
lin_ucb = LinUCB_hybrid(data, alpha, lambda_theta, lambda_beta, delta)
regrets_hyb, r_hyb, films_rec_hyb, r_taken_mean_hyb, r_taken_ucb_hyb, r_esti_mean_hyb, r_ucb_hyb = lin_ucb.fit(users, niter)
list_regrets.append(regrets_hyb)
end = time.time()
print("LinUCB hybride time used: {}".format(end - start))
else:
print('No corresponding approach')
exit
plot_regret_comp(list_regrets, approach, r'$\lambda$', lambda_betas) | minhparis/linucb | LinUCB_last_week/LinUCB_param_search.py | LinUCB_param_search.py | py | 2,140 | python | en | code | 2 | github-code | 13 |
12229081770 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import get_object_or_404,render,redirect
from django.http import HttpResponse,JsonResponse,HttpResponseRedirect
from django.contrib import messages
import json
import random
import datetime
# from datetime import date, timedelta
import time
from django.template.loader import render_to_string
from django.shortcuts import render,HttpResponse
from django.views import View
from django.views.generic import TemplateView, ListView, DetailView, CreateView, UpdateView,DeleteView,FormView
from django.contrib.messages.views import SuccessMessageMixin
from django.contrib.auth.decorators import login_required
from django.contrib import admin
from django.contrib.auth.models import Permission
from django.utils.safestring import mark_safe
from django.urls import reverse_lazy
from django.contrib.auth.mixins import LoginRequiredMixin
from django.db.models import Count
from accounts.models import User,MyRoles
from amrs.models import HdbFlowDataDay,HdbFlowDataMonth,Bigmeter,Alarm
from core.models import Organization,DMABaseinfo
from dmam.utils import merge_values, merge_values_with,merge_values_to_dict
from ggis.models import FenceDistrict
# from django.core.urlresolvers import reverse_lazy
from django.utils.encoding import escape_uri_path
from .resources import BigmeterRTSelectResource
def dmastasticinfo():
organ = Organization.objects.first()
organs = organ.get_descendants(include_self=True)
dmas = None
for o in organs:
if dmas is None:
dmas = o.dma.all()
else:
dmas |= o.dma.all()
data = []
for dma in dmas:
dmastation = dma.dmastation_set.first()
if dmastation is None:
continue
commaddr = dmastation.station_id
dmaflow = 0
month_sale = 0
lastmonth_sale = 0
bili = 0
today = datetime.date.today()
today_str = today.strftime("%Y-%m-%d")
today_flow = HdbFlowDataDay.objects.filter(commaddr=commaddr).filter(hdate=today_str)
if today_flow.exists():
dmaflow = today_flow.first().dosage
month_str = today.strftime("%Y-%m")
month_flow = HdbFlowDataMonth.objects.filter(commaddr=commaddr).filter(hdate=month_str)
if month_flow.exists():
month_sale = month_flow.first().dosage
# lastmonth = datetime.datetime(year=today.year,month=today.month-1,day=1)
# now = datetime.datetime.now()
# lastmonth = now + dateutil.relativedelta.relativedelta(months=-1)
lastmonth = datetime.date.today().replace(day=1) - datetime.timedelta(days=1)
lastmonth_str = lastmonth.strftime("%Y-%m")
lastmonth_flow = HdbFlowDataMonth.objects.filter(commaddr=commaddr).filter(hdate=lastmonth_str)
if lastmonth_flow.exists():
lastmonth_sale = lastmonth_flow.first().dosage
if float(month_sale) > 0 and float(lastmonth_sale) > 0:
bili = (float(month_sale) - float(lastmonth_sale) ) / float(lastmonth_sale)
data.append(
{
"dma_name":dma.dma_name,
"belongto":dma.belongto.name,
"dma_level":"二级",
"dma_status":"在线",
"dmaflow":round(float(dmaflow),2),
"month_sale":round(float(month_sale),2),
"lastmonth_sale":round(float(lastmonth_sale),2),
"bili":round(bili,2)
}
)
return data
def maprealdata(request):
dma_no = request.POST.get("dma_no") or None
result = {}
if dma_no:
dma = DMABaseinfo.objects.get(dma_no=dma_no)
dmartdata = dma.dma_map_realdata()
result["success"] = True
result["dmartdata"] = dmartdata
f=FenceDistrict.objects.filter(dma_no=dma_no)
if f.exists():
feature = f[0].featureCollection()
result["feature"] = feature
# result["feature"] = f[0].shape.geomjson
else:
result["success"] = False
return HttpResponse(json.dumps(result))
return HttpResponse(json.dumps(result))
class MapMonitorView(LoginRequiredMixin,TemplateView):
template_name = "monitor/mapmonitor.html"
def get_context_data(self, *args, **kwargs):
context = super(MapMonitorView, self).get_context_data(*args, **kwargs)
context["page_menu"] = "数据监控"
# context["page_submenu"] = "组织和用户管理"
context["page_title"] = "地图监控"
stat_list = dmastasticinfo()
statsinfo = json.dumps({"statsinfo":stat_list})
context["dmastasticinfo"] = statsinfo
return context
class MapMonitorView2(LoginRequiredMixin,TemplateView):
template_name = "monitor/mapmonitor.html"
def get_context_data(self, *args, **kwargs):
context = super(MapMonitorView2, self).get_context_data(*args, **kwargs)
context["page_menu"] = "数据监控"
# context["page_submenu"] = "组织和用户管理"
context["page_title"] = "地图监控"
stat_list = dmastasticinfo()
statsinfo = json.dumps({"statsinfo":stat_list})
context["dmastasticinfo"] = statsinfo
return context
class MapStationView(LoginRequiredMixin,TemplateView):
template_name = "monitor/mapstation.html"
def get_context_data(self, *args, **kwargs):
context = super(MapStationView, self).get_context_data(*args, **kwargs)
context["page_menu"] = "数据监控"
# context["page_submenu"] = "组织和用户管理"
context["page_title"] = "站点监控"
stat_list = dmastasticinfo()
statsinfo = json.dumps({"statsinfo":stat_list})
context["dmastasticinfo"] = statsinfo
return context
# 返回站点地图组织树下站点信息
@login_required
def getmapstationlist(request):
print('getmapstationlist:',request.POST)
groupName = request.POST.get("groupName")
user = request.user
organs = user.belongto
print(organs,type(organs))
if groupName == '':
selectedgroup = Organization.objects.filter(cid=organs.cid).values().first()
else:
selectedgroup = Organization.objects.filter(cid=groupName).values().first()
stations = user.belongto.station_list_queryset('')
if groupName != "":
stations = stations.filter(belongto__cid=groupName)
# 一次获取全部所需数据,减少读取数据库耗时
stations_value_list = stations.values_list('meter__simid__simcardNumber','username','belongto__name',
'meter__serialnumber','meter__metertype','meter__dn','lng','lat')
bgms = Bigmeter.objects.all().values_list('commaddr','commstate','fluxreadtime','flux','totalflux','pressure','signlen')
def append_data(s):
# query station from bigmeter commaddrss
commaddr = s[0]
b=None
for b0 in bgms:
if b0[0] == commaddr:
b = b0
if s[5] is None:
return None
if s[6] is None:
return None
if b:
return {
"stationname":s[1],
"belongto":s[2],
"serialnumber":s[3],#
"metertype":s[4],
"dn":s[5],
"lng":s[6],
"lat":s[7],
"status":"在线" if b[1] == '1' else "离线",
"readtime":b[2] ,
"flux":round(float(b[3]),2) if b[3] else '',
"totalflux":b[4],
"press":round(float(b[5]),2) if b[5] else '',
"signal":round(float(b[6]),2) if b[6] else '',
}
else:
return None
data = []
# s:station b:bigmeter
for s in stations_value_list:
ret=append_data(s)
if ret is not None:
data.append(ret)
entminfo = {
"coorType":selectedgroup["coorType"],
"longitude":selectedgroup["longitude"],
"latitude":selectedgroup["latitude"],
"zoomIn":selectedgroup["zoomIn"],
"islocation":selectedgroup["islocation"],
"adcode":selectedgroup["adcode"],
"districtlevel":selectedgroup["districtlevel"],
}
result = dict()
result["success"] = "true"
result["obj"] = data
result["entminfo"] = entminfo
return HttpResponse(json.dumps(result))
class RealTimeDataView(LoginRequiredMixin,TemplateView):
template_name = "monitor/realtimedata.html"
def get_context_data(self, *args, **kwargs):
context = super(RealTimeDataView, self).get_context_data(*args, **kwargs)
context["page_menu"] = "数据监控"
# context["page_submenu"] = "组织和用户管理"
context["page_title"] = "实时数据"
stations = self.request.user.belongto.station_list_queryset('')
total_station_num = stations.count()
online_station = stations.filter(meter__state=1)
online_station_num = online_station.count()
biguser_station = stations.filter(biguser=1)
biguser_station_num = biguser_station.count()
focus_station = stations.filter(focus=1)
focus_station_num = focus_station.count()
alarm_station_num = 0
context["total_station_num"] = total_station_num
context["online_station_num"] = online_station_num
context["offline_station_num"] = total_station_num - online_station_num
context["biguser_station_num"] = biguser_station_num
context["focus_station_num"] = focus_station_num
context["alarm_station_num"] = alarm_station_num
return context
class RtdataAmarm(TemplateView):
# model = VWatermeter
template_name = "monitor/showalarm.html"
def get_object(self):
return Bigmeter.objects.get(commaddr=self.kwargs["pk"])
def get_context_data(self, *args, **kwargs):
context = super(RtdataAmarm, self).get_context_data(*args, **kwargs)
obj = self.get_object()
# print("dasfefaesdfsdf----",obj.numbersth,obj.id,obj.serialnumber)
context["object"] = self.get_object()
context["numbersth"] = obj.username
context["wateraddr"] = obj.commaddr
return context
# 返回实时数据页面站点列表
def stationlist_old(request):
draw = 1
length = 0
start=0
if request.method == "GET":
draw = int(request.GET.get("draw", 1))
length = int(request.GET.get("length", 10))
start = int(request.GET.get("start", 0))
search_value = request.GET.get("search[value]", None)
# order_column = request.GET.get("order[0][column]", None)[0]
# order = request.GET.get("order[0][dir]", None)[0]
groupName = request.GET.get("groupName")
simpleQueryParam = request.POST.get("simpleQueryParam")
# print("simpleQueryParam",simpleQueryParam)
if request.method == "POST":
draw = int(request.POST.get("draw", 1))
length = int(request.POST.get("length", 10))
start = int(request.POST.get("start", 0))
pageSize = int(request.POST.get("pageSize", 10))
search_value = request.POST.get("search[value]", None)
order_column = int(request.POST.get("order[0][column]", None))
order = request.POST.get("order[0][dir]", None)
groupName = request.POST.get("groupName")
districtId = request.POST.get("districtId")
simpleQueryParam = request.POST.get("simpleQueryParam")
# print(request.POST.get("draw"))
# print("groupName",groupName)
# print("districtId:",districtId)
# print("post simpleQueryParam",simpleQueryParam)
user = request.user
organs = user.belongto
stations = user.station_list_queryset(simpleQueryParam)
pressures = user.pressure_list_queryset(simpleQueryParam)
print("pressures ",pressures)
if groupName != "":
stations = stations.filter(belongto__uuid=groupName)
pressures = pressures.filter(belongto__uuid=groupName)
# 一次获取全部所需数据,减少读取数据库耗时
stations_value_list = stations.values_list('meter__simid__simcardNumber','username','belongto__name','meter__serialnumber','meter__dn')
pressures_value_list = pressures.values_list('simid__simcardNumber','username','belongto__name','serialnumber','dn')
bgms = Bigmeter.objects.all().order_by('-fluxreadtime').values_list('commaddr','commstate','fluxreadtime','pickperiod','reportperiod',
'flux','plustotalflux','reversetotalflux','pressure','meterv','gprsv','signlen')
alams_sets = Alarm.objects.values('commaddr').annotate(Count('id'))
alarm_dict = {}
for alm in alams_sets:
alarm_dict[alm['commaddr']] = alm['id__count']
# 用户权限拥有的站点通讯识别号
commaddrs = [s[0] for s in stations_value_list ]
commaddrs += [s[0] for s in pressures_value_list]
# print("commaddrs",commaddrs)
tmp_bgms = [b for b in bgms if b[0] in commaddrs]
# print("tmp_bgms",tmp_bgms)
# print("stations",stations)
def bgm_data(b):
# query station from bigmeter commaddrss
commaddr = b[0]
alarm_count = alarm_dict.get(commaddr,0)
# print('alarm_count',alarm_count,commaddr)
s=None
for s0 in stations_value_list:
if s0[0] == commaddr:
s=s0
# pressure
for s0 in pressures_value_list:
if s0[0] == commaddr:
s=s0
# try:
# s = stations.select_related("meter__simid").select_related("belongto").get(meter__simid__simcardNumber=commaddr) #meter__simid__simcardNumber
# except :
# s = None
if s:
return {
"stationname":s[1],
"belongto":s[2],
"serialnumber":s[3],#
"alarm":alarm_count,
"status":b[1],
"dn":s[4],
"readtime":b[2] ,
"collectperiod":b[3],
"updataperiod":b[4],
"influx":round(float(b[5]),2) if b[5] else '',
"plusflux":round(float(b[6]),2) if b[6] else '',
"revertflux":round(float(b[7]),2) if b[7] else '',
"press":round(float(b[8]),2) if b[8] else '',
"baseelectricity":round(float(b[9]),2) if b[9] else '',
"remoteelectricity":round(float(b[10]),2) if b[10] else '',
"signal":round(float(b[11]),2) if b[11] else '',
}
else:
return None
data = []
for b in tmp_bgms[start:start+length]: #[start:start+length]
ret=bgm_data(b)
if ret is not None:
data.append(ret)
recordsTotal = stations.count()
# recordsTotal = bgms.count()
result = dict()
result["records"] = data
result["draw"] = draw
result["success"] = "true"
result["pageSize"] = pageSize
result["totalPages"] = recordsTotal/pageSize
result["recordsTotal"] = recordsTotal
result["recordsFiltered"] = recordsTotal
result["start"] = 0
result["end"] = 0
# print(draw,pageSize,recordsTotal/pageSize,recordsTotal)
return HttpResponse(json.dumps(result))
# 返回实时数据页面站点列表 new
def stationlist(request):
draw = 1
length = 0
start=0
if request.method == "GET":
draw = int(request.GET.get("draw", 1))
length = int(request.GET.get("length", 10))
start = int(request.GET.get("start", 0))
search_value = request.GET.get("search[value]", None)
# order_column = request.GET.get("order[0][column]", None)[0]
# order = request.GET.get("order[0][dir]", None)[0]
groupName = request.GET.get("groupName")
simpleQueryParam = request.POST.get("simpleQueryParam")
# print("simpleQueryParam",simpleQueryParam)
if request.method == "POST":
draw = int(request.POST.get("draw", 1))
length = int(request.POST.get("length", 10))
start = int(request.POST.get("start", 0))
pageSize = int(request.POST.get("pageSize", 10))
search_value = request.POST.get("search[value]", None)
order_column = int(request.POST.get("order[0][column]", None))
order = request.POST.get("order[0][dir]", None)
groupName = request.POST.get("groupName")
districtId = request.POST.get("districtId")
simpleQueryParam = request.POST.get("simpleQueryParam")
# print(request.POST.get("draw"))
# print("groupName",groupName)
# print("districtId:",districtId)
# print("post simpleQueryParam",simpleQueryParam)
user = request.user
organs = user.belongto
stations = user.station_list_queryset(simpleQueryParam)
pressures = user.pressure_list_queryset(simpleQueryParam)
if groupName != "":
stations = stations.filter(belongto__uuid=groupName)
pressures = pressures.filter(belongto__uuid=groupName)
station_values = stations.values('meter__simid__simcardNumber','username','belongto__name','meter__serialnumber','meter__dn')
merged_station = merge_values_to_dict(station_values,'meter__simid__simcardNumber')
# 一次获取全部所需数据,减少读取数据库耗时
pressures_values = pressures.values('simid__simcardNumber','username','belongto__name','serialnumber','dn')
merged_pressure = merge_values_to_dict(pressures_values,'simid__simcardNumber')
bgms = Bigmeter.objects.all().order_by('-fluxreadtime').values('commaddr','commstate','fluxreadtime','pickperiod','reportperiod',
'flux','plustotalflux','reversetotalflux','pressure','meterv','gprsv','signlen','pressurereadtime')
merged_bgms = merge_values_to_dict(bgms,'commaddr')
# alams_sets = Alarm.objects.values("commaddr").annotate(Count('id'))
# alarm_dict = {}
# for alm in alams_sets:
# alarm_dict[alm['commaddr']] = alm['id__count']
# 用户权限拥有的站点通讯识别号
data = []
for b in merged_bgms.keys(): #[start:start+length]
if b in merged_station.keys():
alarm_count = 0 #alarm_dict.get(b,0)
# alarm_count = [a['alm_count'] for a in alams_sets if a['commaddr'] == b ]
# alarm_item = list(filter(lambda alarm: alarm[0] == b, alarm_all))[0]
data.append({
"stationname":merged_station[b]['username'],
"belongto":merged_station[b]['belongto__name'],
"serialnumber":merged_station[b]['meter__serialnumber'],#
"alarm":alarm_count,
"status":merged_bgms[b]['commstate'],
"dn":merged_station[b]['meter__dn'],
"readtime":merged_bgms[b]['fluxreadtime'] ,
"collectperiod":merged_bgms[b]['pickperiod'],
"updataperiod":merged_bgms[b]['reportperiod'],
"influx":round(float(merged_bgms[b]['flux']),2) if merged_bgms[b]['flux'] else '',
"plusflux":round(float(merged_bgms[b]['plustotalflux']),2) if merged_bgms[b]['plustotalflux'] else '',
"revertflux":round(float(merged_bgms[b]['reversetotalflux']),2) if merged_bgms[b]['reversetotalflux'] else '',
"press":round(float(merged_bgms[b]['pressure']),2) if merged_bgms[b]['pressure'] else '',
"baseelectricity":round(float(merged_bgms[b]['meterv']),2) if merged_bgms[b]['meterv'] else '',
"remoteelectricity":round(float(merged_bgms[b]['gprsv']),2) if merged_bgms[b]['gprsv'] else '',
"signal":round(float(merged_bgms[b]['signlen']),2) if merged_bgms[b]['signlen'] else '',
})
if b in merged_pressure.keys():
data.append({
"stationname":merged_pressure[b]['username'],
"belongto":merged_pressure[b]['belongto__name'],
"serialnumber":merged_pressure[b]['serialnumber'],#
"alarm":0,
"status":merged_bgms[b]['commstate'],
"dn":merged_pressure[b]['dn'],
"readtime":merged_bgms[b]['pressurereadtime'] ,
"collectperiod":merged_bgms[b]['pickperiod'],
"updataperiod":merged_bgms[b]['reportperiod'],
"influx":'-',
"plusflux":'-',
"revertflux":'-',
"press":round(float(merged_bgms[b]['pressure']),3) if merged_bgms[b]['pressure'] else '',
"baseelectricity":round(float(merged_bgms[b]['meterv']),2) if merged_bgms[b]['meterv'] else '',
"remoteelectricity":round(float(merged_bgms[b]['gprsv']),2) if merged_bgms[b]['gprsv'] else '',
"signal":round(float(merged_bgms[b]['signlen']),2) if merged_bgms[b]['signlen'] else '',
})
recordsTotal = stations.count() + pressures.count()
# recordsTotal = bgms.count()
result = dict()
result["records"] = data[start:start+length]
result["draw"] = draw
result["success"] = "true"
result["pageSize"] = pageSize
result["totalPages"] = recordsTotal/pageSize
result["recordsTotal"] = recordsTotal
result["recordsFiltered"] = recordsTotal
result["start"] = 0
result["end"] = 0
# print(draw,pageSize,recordsTotal/pageSize,recordsTotal)
return HttpResponse(json.dumps(result))
class RealcurlvView(LoginRequiredMixin,TemplateView):
template_name = "monitor/realcurlv.html"
def get_context_data(self, *args, **kwargs):
context = super(RealcurlvView, self).get_context_data(*args, **kwargs)
context["page_title"] = "实时曲线"
context["page_menu"] = "数据监控"
return context
class VehicleView(LoginRequiredMixin,TemplateView):
template_name = "monitor/vehicle.html"
def get_context_data(self, *args, **kwargs):
context = super(VehicleView, self).get_context_data(*args, **kwargs)
context["page_title"] = "车辆监控"
context["page_menu"] = "数据监控"
return context
class VedioView(LoginRequiredMixin,TemplateView):
template_name = "monitor/vedio.html"
def get_context_data(self, *args, **kwargs):
context = super(VedioView, self).get_context_data(*args, **kwargs)
context["page_title"] = "实时视频"
context["page_menu"] = "数据监控"
return context
class SecondwaterView(LoginRequiredMixin,TemplateView):
template_name = "monitor/secondwater.html"
def get_context_data(self, *args, **kwargs):
context = super(SecondwaterView, self).get_context_data(*args, **kwargs)
context["page_title"] = "二次供水"
context["page_menu"] = "数据监控"
return context
# 返回二供信息
@login_required
def getmapsecondwaterlist(request):
print('getmapsecondwaterlist:',request.POST)
groupName = request.POST.get("groupName")
user = request.user
organs = user.belongto
print(organs,type(organs))
if groupName == '':
selectedgroup = Organization.objects.filter(cid=organs.cid).values().first()
else:
selectedgroup = Organization.objects.filter(cid=groupName).values().first()
secondwaters = user.secondwater_list_queryset('')
if groupName != "":
secondwaters = secondwaters.filter(belongto__cid=groupName)
# 一次获取全部所需数据,减少读取数据库耗时
stations_value_list = secondwaters.values('name','belongto__name','lng','lat','coortype')
def append_data(s):
return {
"stationname":s["name"],
"belongto":s["belongto__name"],
"coortype":s["coortype"],
"lng":s["lng"],
"lat":s["lat"],
"status":"在线" ,
"readtime":"13:14" ,
"flux":'3.14',
"press_out":"1",
"press_in":'',
}
data = []
# s:station b:bigmeter
for s in stations_value_list:
ret=append_data(s)
if ret is not None:
data.append(ret)
entminfo = {
"coorType":selectedgroup["coorType"],
"longitude":selectedgroup["longitude"],
"latitude":selectedgroup["latitude"],
"zoomIn":selectedgroup["zoomIn"],
"islocation":selectedgroup["islocation"],
"adcode":selectedgroup["adcode"],
"districtlevel":selectedgroup["districtlevel"],
}
result = dict()
result["success"] = "true"
result["obj"] = data
result["entminfo"] = entminfo
return HttpResponse(json.dumps(result))
def exportbyselect(request):
'''
实时数据列表 导出
'''
groupName = request.GET.get("groupName")
groupType = request.GET.get("groupType")
districtId = request.GET.get("districtId")
selectCommunity = request.GET.get("selectCommunity")
selectBuilding = request.GET.get("selectBuilding")
selectTreeType = request.GET.get("selectTreeType")
simpleQueryParam = request.GET.get("simpleQueryParam")
organ = request.user.belongto
if groupName:# and groupType == 'group':
try:
organ = Organization.objects.get(uuid=groupName)
except:
pass
pressure_queryset = organ.pressure_list_queryset('').filter(amrs_pressure__fluxreadtime__isnull=False)#.order_by('-amrs_pressure__fluxreadtime')
station_queryset = organ.station_list_queryset('').filter(amrs_bigmeter__fluxreadtime__isnull=False)#.order_by('-amrs_bigmeter__fluxreadtime')
if simpleQueryParam:
station_queryset = station_queryset.filter(
Q(amrs_bigmeter__username__icontains=simpleQueryParam)|
Q(amrs_bigmeter__commaddr__icontains=simpleQueryParam)|
Q(amrs_bigmeter__serialnumber__icontains=simpleQueryParam)
# Q(imei__icontains=query)
).distinct()
pressure_queryset = pressure_queryset.filter(
Q(amrs_pressure__username__icontains=simpleQueryParam)|
Q(amrs_pressure__commaddr__icontains=simpleQueryParam)|
Q(amrs_pressure__serialnumber__icontains=simpleQueryParam)
# Q(imei__icontains=query)
).distinct()
queryset_list = [s.amrs_bigmeter for s in station_queryset]
queryset_list += [s.amrs_pressure for s in pressure_queryset]
queryset_list = sorted(queryset_list, key=lambda x: x.fluxreadtime)
watermeter_resource = BigmeterRTSelectResource()
# dataset = watermeter_resource.export(queryset_list)
HEADERS = []
# name 是路由url中的参数
# resource_name = '%s_Resource()' % name
# export_resource = eval(resource_name)
HEADERS = watermeter_resource.get_export_headers()
dataset = watermeter_resource.export(queryset_list)
import xlwt
book = xlwt.Workbook()
sheet = book.add_sheet('Sheet1') # 创建一个sheet
# -----样式设置----------------
alignment = xlwt.Alignment() # 创建居中
alignment.horz = xlwt.Alignment.HORZ_CENTER
alignment.vert = xlwt.Alignment.VERT_CENTER
# 头部样式
header = xlwt.XFStyle()
header.alignment = alignment
header.font.height = 200
header.font.name = '宋体'
header.font.bold = True # 加粗
# 内容样式
style = xlwt.XFStyle() # 创建样式
style.alignment = alignment # 给样式添加文字居中属性
style.font.height = 200 # 设置200字体大小(默认10号)
style.font.name = '宋体' # 设置 宋体
style.font.colour_index = 0x77 # 颜色
# 序号
sheet.write(0,0,"序号",header)
# 头部标题 设置样式
for tag in range(0, len(HEADERS)):
sheet.write(0, tag+1, HEADERS[tag], header)
# ----------设置列宽--------------
col = sheet.col(tag)
if 420 * (len(HEADERS[tag]) + 2) > 65536:
col.width = 65000
else:
col.width = 420 * (len(HEADERS[tag]) + 2)
# 内容样式
if dataset:
for line in range(0, len(dataset)):
sheet.write(line + 1, 0, line+1, style) #seq
for col in range(0, len(HEADERS)):
sheet.write(line + 1, col+1, dataset[line][col], style)
length = len(HEADERS[col])
for row in range(0, len(dataset)):
if len(str(dataset[row][col])) >= len(str(dataset[row - 1][col])) and len(
str(dataset[row][col])) > length:
length = len(str(dataset[row][col]))
# 设置列宽
colwidth = sheet.col(col+1)
if 420 * (length + 2) > 65536:
colwidth.width = 65000
else:
colwidth.width = 420 * (length + 2)
response = HttpResponse(content_type='application/vnd.ms-excel')
response['Content-Disposition'] = 'attachment; filename='+ escape_uri_path("实时数据导出.xls")
# response['Content-Disposition'] = 'attachment; filename=%s.xls' % urlquote(name)
book.save(response)
return response
class RtdShowInfoView(TemplateView):
'''
实时数据页面 根据用户名选择显示页面信息
'''
# model = VWatermeter
template_name = "monitor/realtimedata-showinfo.html"
def get_object(self):
return Bigmeter.objects.get(commaddr=self.kwargs["pk"])
def get_context_data(self, *args, **kwargs):
context = super(RtdShowInfoView, self).get_context_data(*args, **kwargs)
print(args,kwargs)
context["page_title"] = "实时数据"
context["page_menu"] = "用户信息"
obj = self.get_object()
# print("dasfefaesdfsdf----",obj.numbersth,obj.id,obj.serialnumber)
context["object"] = self.get_object()
context["numbersth"] = obj.username
context["image1"] = '/media/'+ str(obj.station.image1) if obj.station.image1 else ""
context["image2"] = '/media/'+ str(obj.station.image2) if obj.station.image2 else ""
context["image3"] = '/media/'+ str(obj.station.image3) if obj.station.image3 else ""
context["image4"] = '/media/'+ str(obj.station.image4) if obj.station.image4 else ""
context["image5"] = '/media/'+ str(obj.station.image5) if obj.station.image5 else ""
return context | apengok/bsc2000 | monitor/views.py | views.py | py | 30,903 | python | en | code | 1 | github-code | 13 |
71142729299 | import pandas as pd
import numpy as np
class Metric:
def __init__(self, id):
self.id = id
self.name = 'metric name - ' + str(id)
self.parents = []
self.childs = []
def show_metric(self):
print(self.id)
class Tree(Metric):
def __init__(self, rows, elements_size):
self.r = rows
self.elements_size = elements_size
self.rows = []
self.cells = []
self.all_paths = []
def generate_metric_rows(self):
for row in range(1, self.r+1):
self.rows.append(row)
def generate_random_metrics(self):
for row in self.rows:
for element in range(1, self.elements_size+1):
path_id = int(str(row) + str(element))
self.all_paths.append(Metric(id=path_id))
self.cells.append(path_id)
def randomize_network(self):
for path in self.all_paths:
# remove all non interested rows from the list used to build the random network
filtered_parents = list(filter(lambda cell: int(
str(cell)[0:1]) == int(str(path.id)[0:1])+1, self.cells))
filtered_childs = list(filter(lambda cell: int(
str(cell)[0:1]) == int(str(path.id)[0:1])-1, self.cells))
# define 2 parent
for i in range(2):
if filtered_parents:
parent = np.random.choice(
filtered_parents).item()
path.parents.append(parent)
filtered_parents.remove(parent)
i += 1
# define 3 childs
for i in range(3):
if filtered_childs:
child = np.random.choice(filtered_childs).item()
path.childs.append(child)
filtered_childs.remove(child)
i += 1
tree = Tree(5, 5)
tree.generate_metric_rows()
tree.generate_random_metrics()
tree.randomize_network()
if __name__ == '__main__':
Tree(5, 5)
print(tree)
| AndreMaciel66/fake-neural-network | app/fake_kpi_tree_generator.py | fake_kpi_tree_generator.py | py | 2,061 | python | en | code | 0 | github-code | 13 |
1280607001 | import glob
import os
import openpyxl
#①対象ファイルのパス
path = '../excel'
#②対象ファイル種別
fileType = '*.xlsx'
#③置換対象としたいシート名
sheetName = ['表紙']
#④置換対象項目名
tgtItem = ['置き換え対象データ']
#⑤置換後データ
changDate = '置き換え後データ'
#「①対象ファイルのパス」配下にあるExcelファイルのパスを出力
print("■検索対象ファイル")
print(glob.glob(os.path.join(path,fileType )))
#「①対象ファイルのパス」配下にある「xlsx」ファイル数分ループ
for book in glob.glob(os.path.join(path, fileType)):
print("■対象ファイル")
print(book)
bookFlg=0
#ブックの取得
#openpyxl.load_workbook('Excelファイルのパス')
actBook = openpyxl.load_workbook(book)
#シート数分ループ
for actSheetName in actBook.sheetnames:
print("■対象シート")
print(actSheetName)
count = 0
#シート名の判定(「③置換対象としたいシート名」との比較)
if actSheetName in sheetName:
#該当シートの最大行を取得
maxRow = actBook[actSheetName].max_row
#アクティブシートを取得
#ブック変数[シート名]
actSheet = actBook[actSheetName]
#置換対象項目行のループ
#for 行変数 in シート変数.iter_rows(min_row = 2(開始行) ,max_row=2(終了行))
for row in actSheet.iter_rows(min_row=10,max_row=10):
#for セル変数 in 行変数
for cellRow in row:
#置換対象項目の判定(「④置換対象項目名」との比較)
if cellRow.value in tgtItem:
#セル変数.value=置換文字
cellRow.value=changDate
count+=1
bookFlg=1
print(str(count) + "件置換しました。")
#ブックを保存
if bookFlg == 1:
#ブック変数.save(Excelファイルのパス)
actBook.save(book)
else:
actBook.close
| hukuikoki/work-efficiency | cellUpdate.py | cellUpdate.py | py | 2,190 | python | ja | code | 0 | github-code | 13 |
15864162055 | #!/usr/bin/env python
from pylab import *
import wave
import numpy as np
from scipy import signal
import sys
audiofile = sys.argv[1]
#load audio file
waveFile = wave.open(audiofile, 'r')
#get length
length = waveFile.getnframes()
#get sample rate
fs = waveFile.getframerate()
#get block size
blocksize = waveFile.getsampwidth()
#read audio byte array as string
data = waveFile.readframes(length)
#convert from string array to numeric 16 bit byte array
vals = np.fromstring(data, dtype='h')
#convert to float with range -1 to +1
floats = vals/32768.0
NFFT = 1024 # the length of the windowing segments
Fs = fs # the sampling frequency
# Pxx is the segments x freqs array of instantaneous power, freqs is
# the frequency vector, bins are the centers of the time bins in which
# the power is computed, and im is the matplotlib.image.AxesImage
# instance
cmap = plt.cm.jet
Pxx, freqs, bins, im = specgram(floats, NFFT=NFFT, Fs=Fs, noverlap=900,cmap=cmap)
show() | justinsalamon/sonyc-citizensound | processing/spectro.py | spectro.py | py | 974 | python | en | code | 0 | github-code | 13 |
17329631009 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
A common training and evaluation runner to allow for easy and consistent model creation and evalutation
"""
__author__ = "John Hoff"
__email__ = "john.hoff@braindonor.net"
__copyright__ = "Copyright 2019, John Hoff"
__license__ = "Creative Commons Attribution-ShareAlike 4.0 International License"
__version__ = "1.0"
import pandas as pd
from collections import Counter
from skopt import BayesSearchCV
from sklearn.base import clone
from sklearn.externals.joblib import Parallel, delayed
from sklearn.model_selection import StratifiedKFold, train_test_split
from sklearn.utils import shuffle
from utility import batch_predict, batch_predict_proba, EvaluationFrame, Evaluator, Logger, use_project_path, batch_fit_classifier
def crossfold_classifier(estimator, transformer, x_train, y_train, train_index, test_index,
record_predict_proba, verbose, fit_increment, warm_start, max_iters, random_state):
"""
This method allows for training to be done using the joblib parallelism in scikit learn. Overall a hacky
method to allow for incremental training. Really needs to be refactored into a cleaner form.
"""
if hasattr(x_train, 'iloc'):
x_fold_train, x_fold_test = x_train.iloc[train_index], x_train.iloc[test_index]
else:
x_fold_train, x_fold_test = x_train[train_index], x_train[test_index]
if hasattr(y_train, 'iloc'):
y_fold_train, y_fold_test = y_train.iloc[train_index], y_train.iloc[test_index]
else:
y_fold_train, y_fold_test = y_train[train_index], y_train[test_index]
if fit_increment is not None:
if max_iters is not None:
for iter in range(max_iters):
x_fold_train, y_fold_train = shuffle(x_fold_train, y_fold_train, random_state=random_state)
batch_fit_classifier(estimator, x_fold_train, y_fold_train, transformer=transformer, increment=fit_increment, verbose=verbose)
else:
batch_fit_classifier(estimator, x_fold_train, y_fold_train, transformer=transformer, increment=fit_increment, verbose=verbose)
else:
if transformer is not None:
x_fold_train = transformer.transform(x_fold_train)
estimator.fit(x_fold_train, y_fold_train)
y_fold_test_predict = batch_predict(estimator, x_fold_test, transformer=transformer, verbose=False)
fold_predict_frame = EvaluationFrame(y_fold_test, y_fold_test_predict)
fold_predict_proba_frame = None
if record_predict_proba:
y_fold_test_predict_proba = batch_predict_proba(estimator, x_fold_test, transformer=transformer, verbose=False)
fold_predict_proba_frame = EvaluationFrame(y_fold_test, y_fold_test_predict_proba)
return Evaluator.evaluate_classifier_fold(fold_predict_frame, fold_predict_proba_frame)
class Runner:
"""
The runner supports bare estimator fitting and searvh-based fitting. By default it will make use of the a
BayesianSearchCV to perform hyperparameter tuning. Ensures everything is cleanly logged, evaluated, and pickled.
"""
def __init__(
self,
name,
df,
target,
estimator,
hyper_parameters=None):
self.name = name
self.df = df
self.target = target
self.estimator = estimator
self.hyper_parameters = hyper_parameters
self.trained_estimator = None
def run_classification_experiment(
self,
sample=None,
random_state=None,
test_size=0.20,
multiclass=False,
record_predict_proba=False,
sampling=None,
cv=5,
verbose=True,
transformer=None,
fit_increment=None,
warm_start=False,
max_iters=None,
n_jobs=-1):
use_project_path()
logger = Logger('%s.txt' % self.name)
evaluator = Evaluator(logger)
data_frame = self.df
if sample is not None:
data_frame = data_frame.sample(n=sample, random_state=random_state)
x_train, x_test, y_train, y_test = train_test_split(data_frame, data_frame[self.target], test_size=test_size)
if transformer is not None:
logger.time_log('Fitting Transformer...')
transformer.fit(x_train)
logger.time_log('Transformer Fit Complete.\n')
if sampling is not None:
logger.time_log('Starting Data Re-Sampling...')
logger.log('Original Training Shape is %s' % Counter(y_train))
x_new, y_new = sampling.fit_resample(x_train, y_train)
logger.log('Balanced Training Shape is %s' % Counter(y_new))
if hasattr(x_train, 'columns'):
x_new = pd.DataFrame(x_new, columns=x_train.columns)
x_train, y_train = x_new, y_new
logger.time_log('Re-Sampling Complete.\n')
logger.time_log('Shuffling Re-Sampled Data.\n')
x_train, y_train = shuffle(x_train, y_train, random_state=random_state)
logger.time_log('Shuffling Complete.\n')
if self.hyper_parameters is not None:
self.estimator.set_params(**self.hyper_parameters.params)
if cv is not None:
kfold = StratifiedKFold(n_splits=cv, random_state=random_state)
logger.time_log('Cross Validating Model...')
fold_scores = Parallel(n_jobs=n_jobs, verbose=3)(
delayed(crossfold_classifier)(
clone(self.estimator),
transformer,
x_train, y_train,
train_index, test_index,
record_predict_proba, verbose,
fit_increment, warm_start, max_iters, random_state
)
for train_index, test_index in kfold.split(x_train, y_train)
)
logger.time_log('Cross Validation Complete.\n')
logger.time_log('Training Model...')
if fit_increment is not None:
if max_iters is not None:
for iter in range(max_iters):
x_iter_train, y_iter_train = shuffle(x_train, y_train, random_state=random_state)
batch_fit_classifier(self.estimator, x_iter_train, y_iter_train, transformer=transformer, increment=fit_increment, verbose=verbose)
else:
batch_fit_classifier(self.estimator, x_train, y_train, transformer=transformer, increment=fit_increment, verbose=verbose)
else:
if transformer is not None:
x_train_transformed = transformer.transform(x_train)
self.estimator.fit(x_train_transformed, y_train)
else:
self.estimator.fit(x_train, y_train)
logger.time_log('Training Complete.\n')
logger.time_log('Testing Training Partition...')
y_train_predict = batch_predict(self.estimator, x_train, transformer=transformer, verbose=verbose)
logger.time_log('Testing Complete.\n')
train_evaluation_frame = EvaluationFrame(y_train, y_train_predict)
logger.time_log('Testing Holdout Partition...')
y_test_predict = batch_predict(self.estimator, x_test, transformer=transformer, verbose=verbose)
logger.time_log('Testing Complete.\n')
test_evaluation_frame = EvaluationFrame(y_test, y_test_predict)
test_evaluation_frame.save('%s_predict.p' % self.name)
test_proba_evaluation_frame = None
if record_predict_proba:
logger.time_log('Testing Holdout Partition (probability)...')
y_test_predict_proba = batch_predict_proba(self.estimator, x_test, transformer=transformer, verbose=verbose)
test_proba_evaluation_frame = EvaluationFrame(y_test, y_test_predict_proba)
test_proba_evaluation_frame.save('%s_predict_proba.p' % self.name)
logger.time_log('Testing Complete.\n')
if cv is not None:
evaluator.evaluate_fold_scores(fold_scores)
evaluator.evaluate_classifier_result(
self.estimator,
test_evaluation_frame,
train=train_evaluation_frame,
test_proba=test_proba_evaluation_frame,
multiclass=multiclass
)
logger.close()
if self.hyper_parameters is not None:
self.hyper_parameters.save('%s_params.p' % self.name)
self.trained_estimator = self.estimator
def run_classification_search_experiment(
self,
scoring,
sample=None,
random_state=None,
test_size=0.20,
n_jobs=-1,
n_iter=2,
cv=5,
verbose=3,
multiclass=False,
record_predict_proba=False,
sampling=None):
use_project_path()
logger = Logger('%s.txt' % self.name)
search = BayesSearchCV(
self.estimator,
self.hyper_parameters.search_space,
n_jobs=n_jobs,
n_iter=n_iter,
cv=cv,
verbose=verbose,
scoring=scoring,
return_train_score=True
)
data_frame = self.df
if sample is not None:
data_frame = data_frame.sample(n=sample, random_state=random_state)
x_train, x_test, y_train, y_test = train_test_split(data_frame, data_frame[self.target], test_size=test_size)
if sampling is not None:
logger.time_log('Starting Data Re-Sampling...')
logger.log('Original Training Shape is %s' % Counter(y_train))
x_new, y_new = sampling.fit_resample(x_train, y_train)
logger.log('Balanced Training Shape is %s' % Counter(y_new))
if hasattr(x_train, 'columns'):
x_new = pd.DataFrame(x_new, columns=x_train.columns)
x_train, y_train = x_new, y_new
logger.time_log('Re-Sampling Complete.\n')
logger.time_log('Shuffling Re-Sampled Data.\n')
x_train, y_train = shuffle(x_train, y_train, random_state=random_state)
logger.time_log('Shuffling Complete.\n')
logger.time_log('Starting HyperParameter Search...')
results = search.fit(x_train, y_train)
logger.time_log('Search Complete.\n')
logger.time_log('Testing Training Partition...')
y_train_predict = batch_predict(results.best_estimator_, x_train)
logger.time_log('Testing Complete.\n')
train_evaluation_frame = EvaluationFrame(y_train, y_train_predict)
logger.time_log('Testing Holdout Partition...')
y_test_predict = batch_predict(results.best_estimator_, x_test)
logger.time_log('Testing Complete.\n')
test_evaluation_frame = EvaluationFrame(y_test, y_test_predict)
test_evaluation_frame.save('%s_predict.p' % self.name)
test_proba_evaluation_frame = None
if record_predict_proba:
logger.time_log('Testing Holdout Partition (probability)...')
y_test_predict_proba = batch_predict_proba(results.best_estimator_, x_test)
test_proba_evaluation_frame = EvaluationFrame(y_test, y_test_predict_proba)
test_proba_evaluation_frame.save('%s_predict_proba.p' % self.name)
logger.time_log('Testing Complete.\n')
evaluator = Evaluator(logger)
evaluator.evaluate_classifier_result(
results,
test_evaluation_frame,
train=train_evaluation_frame,
test_proba=test_proba_evaluation_frame,
multiclass=multiclass
)
logger.close()
self.hyper_parameters.params = results.best_params_
self.hyper_parameters.save('%s_params.p' % self.name)
self.trained_estimator = results.best_estimator_
| theBraindonor/chicago-crime-arrests | utility/runner.py | runner.py | py | 11,885 | python | en | code | 1 | github-code | 13 |
32799544131 | import queue
import re
import jsonpickle
from python.network.msg import MsgUtils
from python.network.threads.PoliteThread import PoliteThread
from python.emulator.MonitoringUtils import MonitoringRequest
from python.request.qos.QoSMsg import QoSRequest
# MsgDispatcher
# Thread receiving the incoming msg from the network. It decodes and passes them to the corresponding handler
from python.utils.JsonpickleUtils import JsonEnumHandler
class MsgDispatcher(PoliteThread):
MSG_QUEUE_LIMIT = 256
INIT_QOS_REQ = "initialQoSRequest"
TOGGLE_MONITORING = "toggleMonitoring"
def __init__(self):
super().__init__()
self.queue_in = queue.Queue(self.MSG_QUEUE_LIMIT)
self.qos_handler = None
def run(self):
while self.must_run:
raw_msg = self.queue_in.get(True)
decoded_msg = jsonpickle.decode(raw_msg)
# Find the first json key, which should be the first word between "" symbols
pattern = "\"(.*?)\""
first_key = re.search(pattern, raw_msg).group(1)
# According to the first msg key, create the corresponding msg instance and dispatch it
if first_key is not None:
if first_key == self.INIT_QOS_REQ:
qos_msg = QoSRequest(decoded_msg)
self.qos_handler.handle_vapp_qos_request(qos_msg, MsgUtils.ContentType.TYPE_INIT_REQUEST)
if first_key == self.TOGGLE_MONITORING:
monitor_msg = MonitoringRequest(decoded_msg)
self.qos_handler.handle_vapp_monitoring_request(monitor_msg,
MsgUtils.ContentType.TYPE_START_MONITORING)
else:
print("Unknown or absent json key, cannot read this msg")
def add_msg(self, msg):
self.queue_in.put(msg)
def prepare_handlers(self, qh):
self.qos_handler = qh
jsonpickle.handlers.registry.register(MsgUtils.MsgType, JsonEnumHandler)
jsonpickle.handlers.registry.register(MsgUtils.ContentType, JsonEnumHandler)
jsonpickle.handlers.registry.register(MsgUtils.AnswerStatus, JsonEnumHandler)
| EVOLVED-5G/ImmersionNetApp | src/python/network/msg/MsgDispatcher.py | MsgDispatcher.py | py | 2,258 | python | en | code | 0 | github-code | 13 |
36947505306 | """
Script to reproduce the few-shot classification results on Meta-Dataset in:
"Fast and Flexible Multi-Task Classification Using Conditional Neural Adaptive Processes"
https://arxiv.org/pdf/1906.07697.pdf
The following command lines should reproduce the published results within error-bars:
Note before running any of the commands, you need to run the following two commands:
ulimit -n 50000
export META_DATASET_ROOT=<root directory of the cloned or downloaded Meta-Dataset repository>
CNAPs using auto-regressive FiLM adaptation, meta-training on all datasets
--------------------------------------------------------------------------
python run_cnaps.py --data_path <path to directory containing Meta-Dataset records>
CNAPs using FiLM adaptation only, meta-training on all datasets
---------------------------------------------------------------
python run_cnaps.py --feature_adaptation film --data_path <path to directory containing Meta-Dataset records>
CNAPs using no feature adaptation, meta-training on all datasets
----------------------------------------------------------------
python run_cnaps.py --feature_adaptation no_adaptation --data_path <path to directory containing Meta-Dataset records>
CNAPs using FiLM adaptation and TaskNorm, meta-training on all datasets
-----------------------------------------------------------------------
python run_cnaps.py --feature_adaptation film -i 40000 -lr 0.001 --batch_normalization task_norm-i
--data_path <path to directory containing Meta-Dataset records>
- Note that when using Meta-Dataset and auto-regressive FiLM adaptation or FiLM adaptation with TaskNorm
batch normalization, 2 GPUs with at least 16GB of memory are required.
- The other modes require only a single GPU with at least 16 GB of memory.
- If you want to run any of the modes on a single GPU, you can train on a single dataset with fixed shot and way, an
example command line is (though this will not reproduce the meta-dataset results):
python run_cnaps.py --feature_adaptation film -i 20000 -lr 0.001 --batch_normalization task_norm-i
-- dataset omniglot --way 5 --shot 5 --data_path <path to directory containing Meta-Dataset records>
"""
import torch
import numpy as np
import argparse
import os
import pickle
from normalization_layers import TaskNormI
from utils import print_and_log, get_log_files, ValidationAccuracies, loss, aggregate_accuracy, verify_checkpoint_dir
from model import Cnaps
from meta_dataset_reader import MetaDatasetReader, SingleDatasetReader
NUM_VALIDATION_TASKS = 200
NUM_TEST_TASKS = 600
PRINT_FREQUENCY = 1000
def main():
learner = Learner()
learner.run()
class Learner:
def __init__(self):
self.args = self.parse_command_line()
self.checkpoint_dir, self.logfile, self.checkpoint_path_validation, self.checkpoint_path_final \
= get_log_files(self.args.checkpoint_dir, self.args.resume_from_checkpoint, self.args.mode == "test")
print_and_log(self.logfile, "Options: %s\n" % self.args)
print_and_log(self.logfile, "Checkpoint Directory: %s\n" % self.checkpoint_dir)
gpu_device = 'cuda:0'
self.device = torch.device(gpu_device if torch.cuda.is_available() else 'cpu')
self.model = self.init_model()
self.train_set, self.validation_set, self.test_set = self.init_data()
if self.args.dataset == "meta-dataset":
self.dataset = MetaDatasetReader(self.args.data_path, self.args.mode, self.train_set, self.validation_set,
self.test_set, self.args.max_way_train, self.args.max_way_test,
self.args.max_support_train, self.args.max_support_test)
else:
self.dataset = SingleDatasetReader(self.args.data_path, self.args.mode, self.args.dataset, self.args.way,
self.args.shot, self.args.query_train, self.args.query_test)
self.loss = loss
self.accuracy_fn = aggregate_accuracy
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.args.learning_rate)
self.validation_accuracies = ValidationAccuracies(self.validation_set)
self.start_iteration = 0
if self.args.resume_from_checkpoint:
self.load_checkpoint()
self.optimizer.zero_grad()
def init_model(self):
use_two_gpus = self.use_two_gpus()
model = Cnaps(device=self.device, use_two_gpus=use_two_gpus, args=self.args).to(self.device)
self.register_extra_parameters(model)
# set encoder is always in train mode (it only sees context data).
model.train()
# Feature extractor is in eval mode by default, but gets switched in model depending on args.batch_normalization
model.feature_extractor.eval()
if use_two_gpus:
model.distribute_model()
return model
def init_data(self):
if self.args.dataset == "meta-dataset":
train_set = ['ilsvrc_2012', 'omniglot', 'aircraft', 'cu_birds', 'dtd', 'quickdraw', 'fungi', 'vgg_flower']
validation_set = ['ilsvrc_2012', 'omniglot', 'aircraft', 'cu_birds', 'dtd', 'quickdraw', 'fungi', 'vgg_flower',
'mscoco']
test_set = self.args.test_datasets
else:
train_set = [self.args.dataset]
validation_set = [self.args.dataset]
test_set = [self.args.dataset]
return train_set, validation_set, test_set
"""
Command line parser
"""
def parse_command_line(self):
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", choices=["meta-dataset", "ilsvrc_2012", "omniglot", "aircraft", "cu_birds",
"dtd", "quickdraw", "fungi", "vgg_flower", "traffic_sign", "mscoco",
"mnist", "cifar10", "cifar100"], default="meta-dataset",
help="Dataset to use.")
parser.add_argument('--test_datasets', nargs='+', help='Datasets to use for testing',
default=["ilsvrc_2012", "omniglot", "aircraft", "cu_birds", "dtd", "quickdraw", "fungi",
"vgg_flower", "traffic_sign", "mscoco", "mnist", "cifar10", "cifar100"])
parser.add_argument("--data_path", default="../datasets", help="Path to dataset records.")
parser.add_argument("--pretrained_resnet_path", default="../models/pretrained_resnet.pt.tar",
help="Path to pretrained feature extractor model.")
parser.add_argument("--mode", choices=["train", "test", "train_test"], default="train_test",
help="Whether to run training only, testing only, or both training and testing.")
parser.add_argument("--learning_rate", "-lr", type=float, default=5e-4, help="Learning rate.")
parser.add_argument("--tasks_per_batch", type=int, default=16,
help="Number of tasks between parameter optimizations.")
parser.add_argument("--checkpoint_dir", "-c", default='../checkpoints', help="Directory to save checkpoint to.")
parser.add_argument("--test_model_path", "-m", default=None, help="Path to model to load and test.")
parser.add_argument("--feature_adaptation", choices=["no_adaptation", "film", "film+ar"], default="film",
help="Method to adapt feature extractor parameters.")
parser.add_argument("--batch_normalization", choices=["basic", "task_norm-i"],
default="basic", help="Normalization layer to use.")
parser.add_argument("--training_iterations", "-i", type=int, default=110000,
help="Number of meta-training iterations.")
parser.add_argument("--val_freq", type=int, default=10000, help="Number of iterations between validations.")
parser.add_argument("--max_way_train", type=int, default=40,
help="Maximum way of meta-dataset meta-train task.")
parser.add_argument("--max_way_test", type=int, default=50, help="Maximum way of meta-dataset meta-test task.")
parser.add_argument("--max_support_train", type=int, default=400,
help="Maximum support set size of meta-dataset meta-train task.")
parser.add_argument("--max_support_test", type=int, default=500,
help="Maximum support set size of meta-dataset meta-test task.")
parser.add_argument("--resume_from_checkpoint", "-r", dest="resume_from_checkpoint", default=False,
action="store_true", help="Restart from latest checkpoint.")
parser.add_argument("--way", type=int, default=5, help="Way of single dataset task.")
parser.add_argument("--shot", type=int, default=1, help="Shots per class for context of single dataset task.")
parser.add_argument("--query_train", type=int, default=10,
help="Shots per class for target of single dataset task.")
parser.add_argument("--query_test", type=int, default=10,
help="Shots per class for target of single dataset task.")
args = parser.parse_args()
return args
def run(self):
if self.args.mode == 'train' or self.args.mode == 'train_test':
train_accuracies = []
losses = []
total_iterations = self.args.training_iterations
for iteration in range(self.start_iteration, total_iterations):
torch.set_grad_enabled(True)
task_dict = self.dataset.get_train_task()
task_loss, task_accuracy = self.train_task(task_dict)
train_accuracies.append(task_accuracy)
losses.append(task_loss)
# optimize
if ((iteration + 1) % self.args.tasks_per_batch == 0) or (iteration == (total_iterations - 1)):
self.optimizer.step()
self.optimizer.zero_grad()
if (iteration + 1) % PRINT_FREQUENCY == 0:
# print training stats
print_and_log(self.logfile,'Task [{}/{}], Train Loss: {:.7f}, Train Accuracy: {:.7f}'
.format(iteration + 1, total_iterations, torch.Tensor(losses).mean().item(),
torch.Tensor(train_accuracies).mean().item()))
train_accuracies = []
losses = []
if ((iteration + 1) % self.args.val_freq == 0) and (iteration + 1) != total_iterations:
# validate
accuracy_dict = self.validate()
self.validation_accuracies.print(self.logfile, accuracy_dict)
# save the model if validation is the best so far
if self.validation_accuracies.is_better(accuracy_dict):
self.validation_accuracies.replace(accuracy_dict)
torch.save(self.model.state_dict(), self.checkpoint_path_validation)
print_and_log(self.logfile, 'Best validation model was updated.')
print_and_log(self.logfile, '')
self.save_checkpoint(iteration + 1)
# save the final model
torch.save(self.model.state_dict(), self.checkpoint_path_final)
if self.args.mode == 'train_test':
self.test(self.checkpoint_path_final)
self.test(self.checkpoint_path_validation)
if self.args.mode == 'test':
self.test(self.args.test_model_path)
self.logfile.close()
def train_task(self, task_dict):
context_images, target_images, context_labels, target_labels = self.prepare_task(task_dict)
target_logits = self.model(context_images, context_labels, target_images)
task_loss = self.loss(target_logits, target_labels, self.device) / self.args.tasks_per_batch
if self.args.feature_adaptation == 'film' or self.args.feature_adaptation == 'film+ar':
if self.use_two_gpus():
regularization_term = (self.model.feature_adaptation_network.regularization_term()).cuda(0)
else:
regularization_term = (self.model.feature_adaptation_network.regularization_term())
regularizer_scaling = 0.001
task_loss += regularizer_scaling * regularization_term
task_accuracy = self.accuracy_fn(target_logits, target_labels)
task_loss.backward(retain_graph=False)
return task_loss, task_accuracy
def validate(self):
with torch.no_grad():
accuracy_dict ={}
for item in self.validation_set:
accuracies = []
for _ in range(NUM_VALIDATION_TASKS):
task_dict = self.dataset.get_validation_task(item)
context_images, target_images, context_labels, target_labels = self.prepare_task(task_dict)
target_logits = self.model(context_images, context_labels, target_images)
accuracy = self.accuracy_fn(target_logits, target_labels)
accuracies.append(accuracy.item())
del target_logits
accuracy = np.array(accuracies).mean() * 100.0
confidence = (196.0 * np.array(accuracies).std()) / np.sqrt(len(accuracies))
accuracy_dict[item] = {"accuracy": accuracy, "confidence": confidence}
return accuracy_dict
def test(self, path):
print_and_log(self.logfile, "") # add a blank line
print_and_log(self.logfile, 'Testing model {0:}: '.format(path))
self.model = self.init_model()
self.model.load_state_dict(torch.load(path))
with torch.no_grad():
for item in self.test_set:
accuracies = []
for _ in range(NUM_TEST_TASKS):
task_dict = self.dataset.get_test_task(item)
context_images, target_images, context_labels, target_labels = self.prepare_task(task_dict)
target_logits = self.model(context_images, context_labels, target_images)
accuracy = self.accuracy_fn(target_logits, target_labels)
accuracies.append(accuracy.item())
del target_logits
accuracy = np.array(accuracies).mean() * 100.0
accuracy_confidence = (196.0 * np.array(accuracies).std()) / np.sqrt(len(accuracies))
print_and_log(self.logfile, '{0:}: {1:3.1f}+/-{2:2.1f}'.format(item, accuracy, accuracy_confidence))
def prepare_task(self, task_dict):
context_images_np, context_labels_np = task_dict['context_images'], task_dict['context_labels']
target_images_np, target_labels_np = task_dict['target_images'], task_dict['target_labels']
context_images_np = context_images_np.transpose([0, 3, 1, 2])
context_images_np, context_labels_np = self.shuffle(context_images_np, context_labels_np)
context_images = torch.from_numpy(context_images_np)
context_labels = torch.from_numpy(context_labels_np)
target_images_np = target_images_np.transpose([0, 3, 1, 2])
target_images_np, target_labels_np = self.shuffle(target_images_np, target_labels_np)
target_images = torch.from_numpy(target_images_np)
target_labels = torch.from_numpy(target_labels_np)
context_images = context_images.to(self.device)
target_images = target_images.to(self.device)
context_labels = context_labels.to(self.device)
target_labels = target_labels.type(torch.LongTensor).to(self.device)
return context_images, target_images, context_labels, target_labels
def shuffle(self, images, labels):
"""
Return shuffled data.
"""
permutation = np.random.permutation(images.shape[0])
return images[permutation], labels[permutation]
def use_two_gpus(self):
use_two_gpus = False
if self.args.dataset == "meta-dataset":
if self.args.feature_adaptation == "film+ar" or \
self.args.batch_normalization == "task_norm-i":
use_two_gpus = True # These models do not fit on one GPU, so use model parallelism.
return use_two_gpus
def save_checkpoint(self, iteration):
torch.save({
'iteration': iteration,
'model_state_dict': self.model.state_dict(),
'optimizer_state_dict': self.optimizer.state_dict(),
'best_accuracy': self.validation_accuracies.get_current_best_accuracy_dict(),
}, os.path.join(self.checkpoint_dir, 'checkpoint.pt'))
def load_checkpoint(self):
checkpoint = torch.load(os.path.join(self.checkpoint_dir, 'checkpoint.pt'))
self.start_iteration = checkpoint['iteration']
self.model.load_state_dict(checkpoint['model_state_dict'])
self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
self.validation_accuracies.replace(checkpoint['best_accuracy'])
def register_extra_parameters(self, model):
for module in model.modules():
if isinstance(module, TaskNormI):
module.register_extra_weights()
if __name__ == "__main__":
main()
| cambridge-mlg/cnaps | src/run_cnaps.py | run_cnaps.py | py | 17,522 | python | en | code | 155 | github-code | 13 |
42798403165 | from ghidra.program.model.block import BasicBlockModel
from ghidra.app.decompiler import *
from ghidra.framework.plugintool.util import OptionsService
def dumpFuncs(outPath):
# Set image base to 0
curImageBase = currentProgram.getImageBase()
currentProgram.setImageBase(curImageBase.subtract(curImageBase.getOffset()), False)
funcs = []
functionManager = currentProgram.getFunctionManager()
for func in functionManager.getFunctions(True):
if func.isExternal() or func.isThunk():
continue
entry = func.getEntryPoint().getOffset()
funcs.append(entry)
with open(outPath, 'w') as f:
for entry in funcs:
f.write('%x\n' % entry)
if __name__ =='__main__':
outPath = getScriptArgs()[0]
dumpFuncs(outPath)
| B2R2-org/FunProbe | tools/ghidra/scripts/ghidra_script.py | ghidra_script.py | py | 749 | python | en | code | 3 | github-code | 13 |
14274302116 | #python
# File: mc_lxRename_removeX.py
# Author: Matt Cox
# Description: Bulk renames a selection of items, removing X amount of characters from the start or the end. Based upon the user variable removeX.
import lx
import re
lxRRemoveXString = lx.eval( "user.value mcRename.removeX ?" )
lxRRemoveXArgs = lx.args()
lxRRemoveXArg = lxRRemoveXArgs[0]
if lxRRemoveXString < 0:
lxRRemoveXString = 0
try:
lxRSelectedItems = lx.evalN('query sceneservice selection ? all')
for x in lxRSelectedItems:
lx.eval('select.Item %s' %str(x))
lxRMeshNameM = lx.eval('query sceneservice item.name ? %s' %str(x))
try:
if lxRRemoveXArg == "start":
lxRNewNameM = lxRMeshNameM[lxRRemoveXString:]
else:
lxRNewNameM = lxRMeshNameM[:-lxRRemoveXString]
lx.eval('item.name "%s"'%(lxRNewNameM))
except:
lx.eval('dialog.setup error')
lx.eval('dialog.title {Error}')
lx.eval('dialog.msg {Unable to rename items.}')
lx.eval('dialog.open')
lx.eval('select.drop item')
for x in lxRSelectedItems:
lx.eval('select.Item %s add' %str(x))
except:
lx.out('Exception "%s" on line: %d' % (sys.exc_value, sys.exc_traceback.tb_lineno)) | Tilapiatsu/modo-tila_customconfig | mc_lxRename/Scripts/mc_lxRename_removeX.py | mc_lxRename_removeX.py | py | 1,289 | python | en | code | 2 | github-code | 13 |
15963361372 |
class Solution(object):
def __init__(self):
self.diagonal1 = [[0,0],[1,1],[2,2]]
self.diagonal2 = [[0,2],[1,1],[2,0]]
def checkA(self, a_list):
columns = []
rows = []
three_check = []
if (all(x in a_list for x in self.diagonal1)):
return 'A wins'
if (all(x in a_list for x in self.diagonal2)):
return 'A wins'
for x in a_list:
columns.append(x[0])
rows.append(x[1])
three_check.append(columns.count(0))
three_check.append(columns.count(1))
three_check.append(columns.count(2))
three_check.append(rows.count(0))
three_check.append(rows.count(1))
three_check.append(rows.count(2))
for x in three_check:
if x == 3:
return 'A wins'
return ''
def tictactoe(self, moves):
a_list = []
b_list = []
count = 0
for move in moves:
if count % 2 == 0:
a_list.append(move)
else:
b_list.append(move)
count += 1
outcomeA = self.checkA(a_list)
outcomeB = self.checkB(b_list)
final = outcomeA + outcomeB
if final == '':
if len(moves) < 9:
return "Pending"
else:
return 'Draw'
return final
def checkB(self, b_list):
columns = []
rows = []
three_check = []
if (all(x in b_list for x in self.diagonal1)):
return 'B wins'
if (all(x in b_list for x in self.diagonal2)):
return 'B wins'
for x in b_list:
columns.append(x[0])
rows.append(x[1])
three_check.append(columns.count(0))
three_check.append(columns.count(1))
three_check.append(columns.count(2))
three_check.append(rows.count(0))
three_check.append(rows.count(1))
three_check.append(rows.count(2))
for x in three_check:
if x == 3:
return 'B wins'
return ''
game = Solution()
output = game.tictactoe([[0,0],[2,0],[1,1],[2,1],[2,2]])
print(output) | Dan298/LeetCode | TicTacToe.py | TicTacToe.py | py | 2,293 | python | en | code | 0 | github-code | 13 |
23636962778 | # -*- coding: utf-8 -*-
"""
Created on Thu Feb 22 12:10:28 2018
@author: Karthikeyan
"""
#load Data
import pandas as pd
import numpy as np
import os
os.getcwd()
CODELOC = "F:\\Chat_bot\\NLPBot\\"
sentence = pd.read_csv('sentences.csv')
sentence.head(10)
sentence.shape
##feature engineering
#Extracting some parts of POS sequence
import nltk
from nltk import word_tokenize
list_of_triple_string = []
sentence = "Can a dog see in colour?"
sentenceparse = word_tokenize(sentence)
pos_tag = nltk.pos_tag(sentenceparse)
pos = [i[1] for i in pos_tag]
print("word mapped to part of speech tag: ", pos_tag)
print("pos_tag: ", pos)
n = len(pos)
for i in range(0, n-3):
t = ".".join(pos[i:i+3])
list_of_triple_string.append(t)
print("Sequence of triples", list_of_triple_string)
#Extracting features
import sys
sys.path.append(CODELOC)
import features
sentence = "Can a dog see in colour?"
sentence = features.strip_sentence(sentence)
print(sentence)
pos = features.get_pos(sentence)
triples = features.get_triples(pos)
print(triples)
#Dictionary of features
sentence = ["Sorry, I don't know about the weather.",
"That is a tricky question to answer.",
"What does OCM stand for",
"MAX is a Mobile Application Accelerator",
"Can a dog see in colour?",
"how are you"
]
id = 1
for s in sentence:
features_dict = features.features_dict(str(id), s)
features_string,header = features.get_string(str(1), s)
print(features_dict)
id += 1
#Building a machine learning model
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
df = pd.read_csv("featuresDump.csv")
print(str(len(df)), "rows loaded")
#Strip any leading space from col names
df.columns = df.columns[:].str.strip()
df['class'] = df['class'].map(lambda x:x.strip())
width = df.shape[1]
#Test-Train split
np.random.seed(seed = 12)
df['is_train']= np.random.uniform(0, 1, len(df)) <= .75
train, test = df[df['is_train'] == True], df[df['is_train'] == False]
print(str(len(train)), "rows split into train", str(len(test)), "rows split into test")
features = df.columns[1:width-1]
print("FEATURES = {}".format(features))
##Fit a model with training data
#fit on rf model
clf = RandomForestClassifier(n_jobs = 2, n_estimators= 100)
clf.fit(train[features], train['class'])
#Predict on test values
preds = clf.predict(test[features])
predout = pd.DataFrame({'id': test['id'], 'Prediction' : preds, 'actual' : test['class']})
##Basic validation
#Cross_check accuracy
print(pd.crosstab(test['class'], preds, rownames = ['actual'], colnames = ['preds']))
print("\n", pd.crosstab(test['class'], preds, rownames = ['actual'],
colnames = ['preds']).apply(lambda r: round(r/r.sum()*100,2),axis = 1))
from sklearn.metrics import accuracy_score
print("\n\n Accuracy_score: ", round(accuracy_score(test['class'],preds),3))
#Load sentence data & generate features
FNAME = "F:\\Chat_bot\\NLPBot\\analysis\\pythonFAQ.csv"
import csv
import hashlib
import features
fin = open(FNAME, 'rt')
reader = csv.reader(fin)
keys = ["id",
"wordCount",
"stemmedCount",
"stemmedEndNN",
"CD",
"NN",
"NNP",
"NNPS",
"NNS",
"PRP",
"VBG",
"VBZ",
"startTuple0",
"endTuple0",
"endTuple1",
"endTuple2",
"verbBeforeNoun",
"qMark",
"qVerbCombo",
"qTripleScore",
"sTripleScore",
"class"]
rows =[]
next(reader)
for line in reader:
sentence = line[0]
c = line[1]
id = hashlib.md5(str(sentence).encode('utf-8')).hexdigest()[:16]
f = features.features_dict(id, sentence, c)
row = []
for key in keys:
value = f[key]
row.append(value)
row.append(row)
faq = pd.DataFrame(rows, columns = keys)
fin.close()
#predict agaist FAQ test model
featuresNames = faq.columns[1:width-1]
faqPreds = clf.predict(faq[featuresNames])
#Adhoc testing
testout = {'Q': 'QUESTIONS', 'C': 'CHAT', 'S': 'STATEMENT'}
mysentence1 = 'what is your name?'
mysentence2 = 'This is house'
mysentence3 = 'Is the cat Dead'
myFeatures = features.features_dict('1', mysentence2, 'x')
values = []
for key in keys:
values.append(myFeatures[key])
s = pd.Series(values)
width = len(s)
myFeatures = s[1:width-1]
predict = clf.predict([myFeatures])
print("\n\n Prediction is: ", testout[predict[0].strip()])
| karthikbd/NLP-PreProcessing | classification.py | classification.py | py | 4,758 | python | en | code | 0 | github-code | 13 |
20682097308 | import torch
import torch.nn as nn
import gymnasium as gym
import numpy as np
import matplotlib.pyplot as plt
import os
from sklearn.linear_model import LinearRegression
# REINFORCE Policy Gradient Algorithm
# Episodes
EPISODES = 2000
# Max Steps per Episode
MAX_STEPS = 1000
# Discount Factor
GAMMA = 0.99
# Learning Rate
LR = 1e-3
# Seed
SEED = 11
# Solved Score
SOLVED_SCORE = 195
# Model path
PATH = './models/reinforce.pth'
# Environment
env = gym.make('CartPole-v1', render_mode='rgb_array')
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.n
# Policy Network: S -> pi(A|S)
class Policy(nn.Module):
def __init__(self, state_dim, action_dim):
super().__init__()
self.layers = nn.Sequential(
nn.Linear(state_dim, 128),
nn.ReLU(),
nn.Linear(128, action_dim)
)
self.action_dim = action_dim
def forward(self, x):
x = torch.from_numpy(x)
x = self.layers(x)
actions = torch.softmax(x, dim=0)
action = self.get_action(actions)
log_prob = torch.log(actions)[action]
return action, log_prob
def get_action(self, actions):
return np.random.choice(self.action_dim, p=actions.detach().numpy())
def get_cumulative_rewards(rewards):
cr = [rewards[-1]]
for i in range(len(rewards)-2, -1, -1):
cr.append(rewards[i] + GAMMA * cr[-1])
cr.reverse()
return cr
def train():
model = Policy(state_dim, action_dim)
optim = torch.optim.SGD(model.parameters(), lr=LR)
model.train()
total_rewards = []
for episode in range(EPISODES):
state = env.reset(seed=SEED)[0]
log_probs = []
rewards = []
# run trajectory through episode
for _ in range(MAX_STEPS):
action, log_prob = model(state)
next_s, r, done, _, _ = env.step(action)
log_probs.append(log_prob)
rewards.append(r)
state = next_s
if done:
break
# update policy parameters
optim.zero_grad()
cumulative_rewards = torch.tensor(get_cumulative_rewards(rewards))
log_probs = torch.stack(log_probs)
policy_loss = -(log_probs * cumulative_rewards).mean()
policy_loss.backward()
optim.step()
total_rewards.append(np.sum(rewards))
mean = np.mean(total_rewards[-100:])
if episode % 100 == 0:
print(f'EPISODE: {episode}, MEAN: {mean}')
if mean > 195:
print(f'Game Solved at Episode {episode}')
break
# plot results
plt.plot(total_rewards)
plt.xlabel('Episodes')
plt.ylabel('Reward')
plt.title('REINFORCE Policy Gradient on CartPole')
plt.xlim(right=2000)
plt.ylim(top=500)
reg = LinearRegression().fit(
np.reshape(np.arange(len(total_rewards)), (-1, 1)),
np.reshape(total_rewards, (-1, 1))
)
plt.plot(reg.predict(np.reshape(np.arange(len(total_rewards)), (-1, 1))))
plt.show()
torch.save(model.state_dict(), PATH)
def eval():
model = Policy(state_dim, action_dim)
model.eval()
if os.path.exists(PATH):
model.load_state_dict(torch.load(PATH))
eval_episodes = 30
eval_steps = 10000
total_rewards = []
for episode in range(eval_episodes):
state = env.reset(seed=SEED)[0]
rewards = []
for _ in range(eval_steps):
action = model(state)[0]
next_s, r, done, _, _ = env.step(action)
state = next_s
rewards.append(r)
if done:
break
total_rewards.append(np.sum(rewards))
print(f'EPISODE: {episode}, REWARD: {np.sum(rewards)}')
print(f'MEAN: {np.mean(total_rewards)}')
if __name__ == '__main__':
train()
eval()
| Derrc/Reinforcement-Learning | policy-based/reinforce.py | reinforce.py | py | 4,004 | python | en | code | 1 | github-code | 13 |
12392351216 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#Reference https://ebisuke33.hatenablog.com/entry/abc197c
def main():
N = int(input())
array = list(map(int,input().split()))
ans = 10**9+7
if N==1:
print(array[0])
exit()
for i in range(2**(N-1)):
base = 0
or_value = array[0]
for j in range(1,N):
if (i>>(j-1)) & 1:
base ^= or_value
or_value = 0
or_value |= array[j]
else:
or_value |= array[j]
base ^= or_value
ans = min(ans,base)
print(ans)
if __name__ == '__main__':
main() | 06keito/study-atcoder | src/abc197_c.py | abc197_c.py | py | 647 | python | en | code | 0 | github-code | 13 |
72058652499 | # 마을을 분할할 예정. 분리된 마을 안에 집들은 연결되도록 해야함
# 유지비가 최소로 되게끔 하고 싶어함.
# 루트노드를 찾아주는 함수
def findParent(parent, x):
if parent[x] != x:
# 루트노드가 아니라면 재귀적으로 호출
return findParent(parent, parent[x])
return parent[x]
def unionParent(parent, a, b):
a = findParent(parent, a)
b = findParent(parent, b)
if a < b:
parent[b] = a
else:
parent[a] = b
# n: 집 갯수, m: 집을 연결하는 길 (양방향)
n, m = map(int, input().split())
parent = [0] * (n + 1)
for i in range(1, n+1):
parent[i] = i
edges = []
for i in range(m):
a, b, cost = map(int, input().split())
edges.append((cost, a, b))
# 금액순으로 정렬
edges.sort()
# 마을을 2개로 만들거이므로 비용이 큰 간선 하나는 없애도됨..
last = 0 # 가장 비용이 큰 간선
result = 0
for edge in edges:
cost, a, b = edge
# 이미 갈 수있는 곳은 무시..
if findParent(parent, a) != findParent(parent, b):
unionParent(parent, a, b)
result += cost
last = cost
print(result-last)
| jaehee222/CodingTest | 1/graph/graph_5.py | graph_5.py | py | 1,193 | python | ko | code | 0 | github-code | 13 |
6683377762 | """
**********************************************************************************
This module contains all the business logic for lists services.
**********************************************************************************
"""
from uuid import UUID, uuid4
from datetime import datetime
import uuid
import flask
from pymysql.structs import DbOperationResult
from ...common import responses
from ...models import List, ListType
from . import sql_commands as sql_engine
#------------------------------------------------------
# Return all of a user's lists
#------------------------------------------------------
def getAllLists(request_args: dict) -> flask.Response:
db_result = _selectMultiple(request_args)
if db_result.successful:
return responses.get(db_result.data)
else:
return responses.badRequest(db_result.error)
#------------------------------------------------------
# If the given request_args has a url query arg, return all lists of that type
# Otherwise, return all the lists owned by the client
#------------------------------------------------------
def _selectMultiple(request_args: dict) -> DbOperationResult:
filter_type_val = request_args.get('type') or None
if not filter_type_val:
return sql_engine.selectAll()
else:
list_type = ListType(filter_type_val)
return sql_engine.selectAllOfType(list_type)
#------------------------------------------------------
# Send a response with a single list
#------------------------------------------------------
def getList(list_id: UUID) -> flask.Response:
query_result = sql_engine.selectSingle(list_id)
if not query_result.successful:
return responses.badRequest(query_result.error)
return responses.get(query_result.data)
#------------------------------------------------------
# Generate response for cloning a list
#------------------------------------------------------
def cloneListResponse(list_id: UUID, request_form: dict) -> flask.Response:
new_list = List(
id = uuid.uuid4(),
user_id = flask.g.client_id,
name = request_form.get('name') or None
)
if not new_list.name:
return responses.badRequest('Missing required request body field: name')
clone_db_result = sql_engine.clone(list_id, new_list)
if not clone_db_result.successful:
return responses.badRequest(clone_db_result.error)
db_select = sql_engine.selectSingle(new_list.id)
return responses.created(db_select.data)
#------------------------------------------------------
# Create a brand new list
#------------------------------------------------------
def createList(request_body: dict) -> flask.Response:
# need to create a new uuid for the list
new_list_id = uuid4()
return _modifyList(new_list_id, request_body)
#------------------------------------------------------
# Update an existing list or create a list with the given id
#------------------------------------------------------
def updateList(list_id: UUID, request_body: dict) -> flask.Response:
return _modifyList(list_id, request_body)
#------------------------------------------------------
# Steps to take for creating or updating a list
#------------------------------------------------------
def _modifyList(list_id: UUID, request_body: dict) -> flask.Response:
# create a new list model
new_list = dictToList(request_body)
new_list.id = list_id
# make sure the request body contained a name field
if not new_list.name:
return responses.badRequest('Missing required field: name')
# insert the record into the database
db_result = sql_engine.modify(new_list)
if not db_result.successful:
return responses.badRequest(db_result.error)
# determine the appropriate return code we need to send back
if db_result.data == 1:
response_function = responses.created
else:
response_function = responses.updated # 2 rows updated
# retrieve the list object from the database that contains all the updated values
response_data = sql_engine.selectSingle(new_list.id)
return response_function(response_data.data)
#------------------------------------------------------
# Parse the given dict into a new List model object
#------------------------------------------------------
def dictToList(dict_obj: dict) -> List:
return List(
user_id = flask.g.client_id,
name = dict_obj.get('name') or None,
created_on = datetime.now(),
type = ListType(dict_obj.get('type'))
)
#------------------------------------------------------
# Delete a list
#------------------------------------------------------
def deleteList(list_id: UUID) -> flask.Response:
db_result = sql_engine.delete(List(id=list_id))
if not db_result.successful:
return responses.badRequest(db_result.error)
# if the rows affected is not 1 the list does not exist or is not owned by the client
if db_result.data != 1:
return responses.forbidden()
return responses.deleted()
| rrickgauer/lists | src/api/api_lists/services/lists/routines.py | routines.py | py | 5,128 | python | en | code | 1 | github-code | 13 |
6523096806 | #!/usr/bin/env python3
import random
def GenRanMac():
MacList = []
for i in range(1,7):
RanStr = "".join(random.sample("01234567890abcdef",2))
MacList.append(RanStr)
RanMac = ":".join(MacList)
return RanMac
print (GenRanMac())
| foxleoly/python3 | randomMac.py | randomMac.py | py | 236 | python | en | code | 1 | github-code | 13 |
31942754836 | from flask_json_schema import JsonSchema
schema = JsonSchema()
template_request = {
'required': ["title", "description", "severity"],
'properties': {
'title': {'type': 'string'},
'description': {'type': 'string'},
'severity': {'type': 'string'}
}
}
template_request_delete = {
'required': ["ticket_id"],
'properties': {
'ticket_id': {'type': 'integer'}
}
}
template_request_update = {
'required': ["ticket_id", "value"],
'properties': {
'ticket_id': {'type': 'integer'},
'value': {'type': 'string'}
}
} | malinowakrew/rest_api | schema/__init__.py | __init__.py | py | 589 | python | ko | code | 0 | github-code | 13 |
74007830416 | numero_casos = int(input())
divisores = 0
while numero_casos > 0:
num = int(input())
for a in range(1, num):
if num % a == 0:
divisores += a
if divisores == num:
print(num, "eh perfeito")
else:
print(num, "nao eh perfeito")
numero_casos -= 1
divisores = 0
| broeringlucas/SIN-UFSC | INE5603 - POO1/Estruturas de Repetição/numero_perfeito.py | numero_perfeito.py | py | 334 | python | pt | code | 0 | github-code | 13 |
36260320592 | #
# Turn command to rotate models.
#
def turn_command(cmdname, args, session):
from .parse import float_arg, int_arg, axis_arg, parse_arguments
req_args = (('axis', axis_arg),
('angle', float_arg),)
opt_args = (('frames', int_arg),)
kw_args = ()
kw = parse_arguments(cmdname, args, session, req_args, opt_args, kw_args)
kw['session'] = session
turn(**kw)
def turn(axis, angle, frames = 1, session = None):
v = session.view
c = v.camera
cv = c.position
saxis = cv.apply_without_translation(axis) # Convert axis from camera to scene coordinates
center = v.center_of_rotation
from ..geometry.place import rotation
r = rotation(saxis, -angle, center)
if frames == 1:
c.position = r*cv
else:
def rotate(r=r,c=c):
c.position = r*c.position
from . import motion
motion.call_for_n_frames(rotate, frames, session)
| HamineOliveira/ChimeraX | src/apps/hydra/commands/turncmd.py | turncmd.py | py | 933 | python | en | code | null | github-code | 13 |
6200711062 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 5 14:16:27 2020
@author: vijetadeshpande
"""
import torch
import torch.nn as nn
import random
class Model(nn.Module):
def __init__(self, encoder, decoder):
super().__init__()
self.encoder = encoder
self.decoder = decoder
return
def forward(self, source, target, module = 'train'):
# dim
TRG_LEN, BATCH_SIZE, TRG_DIM = target.shape
# encoder pass
memory = self.encoder(source)
# decoder pass
prediction = self.decoder(target, memory, module)
return prediction
| vijetadeshpande/meta-environment | Transformer/TransformerModel.py | TransformerModel.py | py | 718 | python | en | code | 0 | github-code | 13 |
71378298258 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat May 2 14:25:18 2020
@author: hossein
"""
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation
from tensorflow.keras.optimizers import Adam
import gym
from collections import deque
import numpy as np
import random as rnd
import datetime
from log_metric import ExSARSAMetric
#tf.config.set_visible_devices([], 'GPU')
REPLAY_MEMORY_SIZE = 10000
MIN_REPLAY_MEMORY_SIZE = 100
MINIBATCH_SIZE = 16
UPDATE_TARGET_AFTER = 1000
DISCOUNT = 0.95
MAX_ACTION = 35000
LOGGING = True
current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
exsarsa_reward_log_dir = 'logs/gradient_tape/' + current_time + '/exsarsa_reward2'
class DQNAgent:
def __init__(self, state_size, action_size):
self.state_size = state_size
self.action_size = action_size
"""
in each step in training phase, the agent will do an action, to do the action,
agent needs the Q_VALUES which is the QNetwork (obviousely it's maximum)
so for each action. the QNetwork.predict() will be called
On the other hand after each action the QNetwork will tries to fit it self just
by one input! There are two problem on this:
1- Fitting a NN just by one input is not a good idea. We know it is better
to fitting on a BATCH of data (A sequence of data)
2- If we do predict-fit-predict-fit we do not have consistency on predicts
So it is better to use two QNetwork:
1-model 2-target_model
and also using experience replay
The agent use for target_model for predicts and the
model we be trained on BATCH thanks to the experience reply
eperience replay is also for planning (like DYNAQ on RL specialization)
we will copy model to target_model after UPDATE_AFTER actions
"""
self.model = self.create_QNetwork()
self.target_model = self.create_QNetwork()
self.target_model.set_weights(self.model.get_weights())
self.replay_memory = deque(maxlen=REPLAY_MEMORY_SIZE)
self.tau = 1
self.target_update_after_counter = 0
if LOGGING:
self.exsarsa_reward_writer = tf.summary.create_file_writer(exsarsa_reward_log_dir)
self.exsarsa_reward_metric = ExSARSAMetric()
def create_QNetwork(self):
model = Sequential()
model.add(Dense(128, input_dim=self.state_size))
model.add(Activation('relu'))
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dense(self.action_size))
model.add(Activation('linear'))
model.compile(optimizer=Adam(lr=0.001, decay=0.00001), loss="mse", metrics=['accuracy'])
return model
def get_qvalues(self, state):
return self.model.predict(state)
def softmax(self, qvalues):
preferences = qvalues / self.tau
max_preference = np.amax(qvalues, axis=1) / self.tau
reshaped_max_preference = max_preference.reshape((-1, 1))
# Compute the numerator, i.e., the exponential of the preference - the max preference.
exp_preferences = np.exp(preferences - reshaped_max_preference)
# Compute the denominator, i.e., the sum over the numerator along the actions axis.
sum_of_exp_preferences = np.sum(exp_preferences, axis=1)
reshaped_sum_of_exp_preferences = sum_of_exp_preferences.reshape((-1, 1))
action_probs = exp_preferences / reshaped_sum_of_exp_preferences
action_probs = action_probs.squeeze()
return action_probs
def act(self, state):
qvalues = self.get_qvalues(state)
actions_probability = self.softmax(qvalues)
action = np.random.choice(self.action_size, p=actions_probability.squeeze())
return action
def update_replay_memory(self, transition):
self.replay_memory.append(transition)
def train(self, terminal):
if len(self.replay_memory) < MIN_REPLAY_MEMORY_SIZE:
return
for i in range(2):
minibatch = rnd.sample(self.replay_memory, MINIBATCH_SIZE)
current_states = np.array([transition[0] for transition in minibatch])
current_qvalues_list = self.model.predict(current_states.squeeze())
next_states = np.array([transition[3] for transition in minibatch])
next_qvalues_list = self.target_model.predict(next_states.squeeze())
next_actions_prob = self.softmax(next_qvalues_list)
x_train = []
y_train = []
for index, (current_state, action, reward, next_state, done) in enumerate(minibatch):
if not done:
future_reward = np.inner(next_qvalues_list[index], next_actions_prob[index])
desired_q = reward + DISCOUNT * future_reward
else:
desired_q = reward
current_q_values = current_qvalues_list[index]
current_q_values[action] = desired_q
x_train.append(current_state)
y_train.append(current_q_values)
x_train = np.array(x_train)
y_train = np.array(y_train)
x_train = np.reshape(x_train, [len(minibatch), self.state_size])
y_train = np.reshape(y_train, [len(minibatch), self.action_size])
self.model.fit(x_train, y_train, batch_size=MINIBATCH_SIZE, verbose=0)
self.target_update_after_counter += 1
if self.target_update_after_counter > UPDATE_TARGET_AFTER and terminal:
self.target_model.set_weights(self.model.get_weights())
self.target_update_after_counter = 0
print("*Target model updated*")
def cartpole():
env = gym.make("CartPole-v0")
observation_space_size = env.observation_space.shape[0]
action_space_size = env.action_space.n
exsarsa_agent = DQNAgent(observation_space_size, action_space_size)
episode_num = 0
action_num = 0
task_done = deque(maxlen=20)
while True:
episode_num += 1
state = env.reset()
state = np.reshape(state, [1, observation_space_size])
t = 0
while True:
env.render()
action = exsarsa_agent.act(state)
state_next, reward, terminal, info = env.step(action)
reward = reward if not terminal else -reward
state_next = np.reshape(state_next, [1, observation_space_size])
transition = (state, action, reward, state_next, terminal)
exsarsa_agent.update_replay_memory(transition)
exsarsa_agent.train(terminal)
state = state_next
t += 1
action_num += 1
if sum(task_done)/(len(task_done)+1)>195:
env.close()
if terminal:
print("Episode {} finished after {} timesteps".format(episode_num, t))
task_done.append(t)
if LOGGING:
exsarsa_agent.exsarsa_reward_metric.update_state(t)
with exsarsa_agent.exsarsa_reward_writer.as_default():
tf.summary.scalar('exsarsa_reward', exsarsa_agent.exsarsa_reward_metric.result(), step=episode_num)
exsarsa_agent.exsarsa_reward_metric.reset_states()
break
def main():
cartpole()
if __name__ == "__main__":
main()
| HosseinSheikhi/Cartpole | ExpectedSARSA/expectedSARSA.py | expectedSARSA.py | py | 7,508 | python | en | code | 0 | github-code | 13 |
23836458665 | import argparse
import requests
import struct
class ParsingError(Exception): pass
class DataBlock(object):
def __init__(self, data, debug=False):
super(DataBlock, self).__init__()
self.data = data
self.pos = 0
self.debug = debug
def offset_read(self, length, offset=None):
if not offset:
offset_position = self.pos
else:
offset_position = offset
if len(self.data) < offset_position + length:
raise ParsingError('Offset+Length > len(self.data)')
if not offset:
self.pos += length
value = self.data[offset_position:offset_position + length]
self._log('Reading: {}-{} => {}'.format(hex(offset_position), hex(offset_position + length), value))
return value
def skip(self, length):
self.pos += length
def read_filename(self):
length, = struct.unpack_from('>I', self.offset_read(4))
filename = self.offset_read(2 * length).decode('utf-16be')
structure_id, = struct.unpack_from('>I', self.offset_read(4))
structure_type, = struct.unpack_from('>4s', self.offset_read(4))
structure_type = structure_type.decode()
self._log('Structure type ', structure_type)
skip = -1
while skip < 0:
if structure_type == 'bool':
skip = 1
elif structure_type == 'type' or structure_type == 'long' or structure_type == 'shor' or structure_type == 'fwsw' or structure_type == 'fwvh' or structure_type == 'icvt' or structure_type == 'lsvt' or structure_type == 'vSrn' or structure_type == 'vstl':
skip = 4
elif structure_type == 'comp' or structure_type == 'dutc' or structure_type == 'icgo' or structure_type == 'icsp' or structure_type == 'logS' or structure_type == 'lg1S' or structure_type == 'lssp' or structure_type == 'modD' or structure_type == 'moDD' or structure_type == 'phyS' or structure_type == 'ph1S':
skip = 8
elif structure_type == 'blob':
blen, = struct.unpack_from('>I', self.offset_read(4))
skip = blen
elif structure_type == 'ustr' or structure_type == 'cmmt' or structure_type == 'extn' or structure_type == 'GRP0':
blen, = struct.unpack_from('>I', self.offset_read(4))
skip = 2 * blen
elif structure_type == 'BKGD':
skip = 12
elif structure_type == 'ICVO' or structure_type == 'LSVO' or structure_type == 'dscl':
skip = 1
elif structure_type == 'Iloc' or structure_type == 'fwi0':
skip = 16
elif structure_type == 'dilc':
skip = 32
elif structure_type == 'lsvo':
skip = 76
elif structure_type == 'icvo':
pass
elif structure_type == 'info':
pass
else:
pass
if skip <= 0:
self._log('Re-reading!')
self.skip(-1 * 2 * 0x4)
filename += self.offset_read(0x2).decode('utf-16be')
structure_id, = struct.unpack_from('>I', self.offset_read(4))
structure_type, = struct.unpack_from('>4s', self.offset_read(4))
structure_type = structure_type.decode()
future_structure_type = struct.unpack_from('>4s', self.offset_read(4, offset=self.pos))
self._log('Re-read structure_id {} / structure_type {}'.format(structure_id, structure_type))
if structure_type != 'blob' and future_structure_type != 'blob':
structure_type = ''
self._log('Forcing another round!')
self.skip(skip)
self._log('Filename {}'.format(filename))
return filename
def _log(self, *args):
if self.debug:
print('[DEBUG] {}'.format(*args))
class DS_Store(DataBlock, object):
def __init__(self, data, debug=False):
super(DS_Store, self).__init__(data, debug)
self.data = data
self.root = self.__read_header()
self.offsets = self.__read_offsets()
self.toc = self.__read_TOC()
self.freeList = self.__read_freelist()
self.debug = debug
def __read_header(self):
if len(self.data) < 36:
raise ParsingError('Length of data is too short!')
magic1, magic2 = struct.unpack_from('>II', self.offset_read(2 * 4))
if not magic1 == 0x1 and not magic2 == 0x42756431:
raise ParsingError('Magic byte 1 does not match!')
offset, size, offset2 = struct.unpack_from('>III', self.offset_read(3 * 4))
self._log('Offset 1: {}'.format(offset))
self._log('Size: {}'.format(size))
self._log('Offset 2: {}'.format(offset2))
if not offset == offset2:
raise ParsingError('Offsets do not match!')
self.skip(4 * 4)
return DataBlock(self.offset_read(size, offset + 4), debug=self.debug)
def __read_offsets(self):
start_pos = self.root.pos
count, = struct.unpack_from('>I', self.root.offset_read(4))
self._log('Offset count: {}'.format(count))
self.root.skip(4)
offsets = []
for i in range(count):
address, = struct.unpack_from('>I', self.root.offset_read(4))
self._log('Offset {} is {}'.format(i, address))
if address == 0:
continue
offsets.append(address)
section_end = start_pos + (count // 256 + 1) * 256 * 4 - count * 4
self.root.skip(section_end)
self._log('Skipped {} to {}'.format(hex(self.root.pos + section_end), hex(self.root.pos)))
self._log('Offsets: {}'.format(offsets))
return offsets
def __read_TOC(self):
self._log('POS {}'.format(hex(self.root.pos)))
count, = struct.unpack_from('>I', self.root.offset_read(4))
self._log('Toc count: {}'.format(count))
toc = {}
for i in range(count):
toc_len, = struct.unpack_from('>b', self.root.offset_read(1))
toc_name, = struct.unpack_from('>{}s'.format(toc_len), self.root.offset_read(toc_len))
block_id, = struct.unpack_from('>I', self.root.offset_read(4))
toc[toc_name.decode()] = block_id
self._log('Toc {}'.format(toc))
return toc
def __read_freelist(self):
freelist = {}
for i in range(32):
freelist[2 ** i] = []
blkcount, = struct.unpack_from('>I', self.root.offset_read(4))
for j in range(blkcount):
free_offset, = struct.unpack_from('>I', self.root.offset_read(4))
freelist[2 ** i].append(free_offset)
self._log('Freelist: {}'.format(freelist))
return freelist
def __block_by_id(self, block_id):
if len(self.offsets) < block_id:
raise ParsingError('BlockID out of range!')
addr = self.offsets[block_id]
offset = (int(addr) >> 0x5 << 0x5)
size = 1 << (int(addr) & 0x1f)
self._log('New block: addr {} offset {} size {}'.format(addr, offset + 0x4, size))
return DataBlock(self.offset_read(size, offset + 0x4), debug=self.debug)
def traverse_root(self):
root = self.__block_by_id(self.toc['DSDB'])
root_id, = struct.unpack('>I', root.offset_read(4))
self._log('Root-ID ', root_id)
internal_block_count, = struct.unpack('>I', root.offset_read(4))
record_count, = struct.unpack('>I', root.offset_read(4))
block_count, = struct.unpack('>I', root.offset_read(4))
unknown, = struct.unpack('>I', root.offset_read(4))
return self.traverse(root_id)
def traverse(self, block_id):
node = self.__block_by_id(block_id)
next_pointer, = struct.unpack('>I', node.offset_read(4))
count, = struct.unpack('>I', node.offset_read(4))
self._log('Next Ptr {} with {} '.format(hex(next_pointer), hex(count)))
filenames = []
if next_pointer > 0:
for i in range(0, count, 1):
next_id, = struct.unpack('>I', node.offset_read(4))
self._log('Child: {}'.format(next_id))
files = self.traverse(next_id)
filenames += files
filename = node.read_filename()
self._log('Filename: ', filename)
filenames.append(filename)
files = self.traverse(next_pointer)
filenames += files
else:
for i in range(0, count, 1):
f = node.read_filename()
filenames.append(f)
return filenames
class Crawler:
def __init__(self, target, debug=False):
self.target = target
self.debug = debug
self._log('Crawling started')
def crawl(self, target=False):
if not target:
target = self.target
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.14; rv:60.0) Gecko/20100101 Firefox/60.0'}
self._log('Crawling ' + target)
r = requests.get(target + '/.DS_Store', headers=headers)
if r.status_code == 200:
ds = DS_Store(r.content, self.debug)
files = ds.traverse_root()
for file in list(set(files)):
self.crawl(target + '/' + file)
print('\033[92m' + '[+]' + '\033[m' + ' Found ' + target + '/' + file)
def _log(self, *args):
if self.debug:
print('\033[93m' + '[DEBUG]'+'\033[m'+' {}'.format(*args))
def banner():
print('\033[92m\033[1m'+''' ___ __ ___ _
/ \/ _\ / __\ __ __ ___ _| | ___ _ __
/ /\ /\ \ / / | '__/ _` \ \ /\ / / |/ _ \ '__|
/ /_// _\ \ / /__| | | (_| |\ V V /| | __/ |
/___,' \__/ \____/_| \__,_| \_/\_/ |_|\___|_|
'''+'\033[91m'+'''v1.0 [0xdeadbeef]'''+'\033[m')
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-u', '--url', required=True, help='web site that you want to crawl')
parser.add_argument('-v', '--verbose', help='To enable verbosity', action='store_true')
args = parser.parse_args()
target = args.url
verbose = args.verbose
crawler = Crawler(target, verbose)
crawler.crawl()
if __name__ == '__main__':
banner()
main()
| duty1g/DS-Crawler | DS_Crawler.py | DS_Crawler.py | py | 10,444 | python | en | code | 1 | github-code | 13 |
41433766062 | from django.shortcuts import render
from .models import Article,ArticleImage
from django.views.generic import DetailView,TemplateView
from .forms import ArticleFormSet,ArticleForm
class ArticleDetailView(DetailView):
model = Article
template_name='articles/article.html'
def get_context_data(self, **kwargs):
context = super(ArticleDetailView,self).get_context_data(**kwargs)
return context
class ArticlesView(TemplateView):
template_name = 'articles/articles_list.html'
def get_context_data(self,**kwargs):
context=super().get_context_data(**kwargs)
context['articles']=Article.objects.order_by('-date')
# context['images']=ArticleImage.objects
return context
| faci2000/political_website | articles/views.py | views.py | py | 734 | python | en | code | 0 | github-code | 13 |
1416166776 | import sys
if len(sys.argv) <= 1:
raise Exception("No inputs")
with open(sys.argv[1], 'r') as f:
lines = [l.rstrip() for l in f.readlines()]
def read_tiles(ls):
i = 0
tiles = []
while i < len(ls):
l = ls[i]
title, id = l[:-1].split(' ')
i += 1
m = []
while i < len(ls) and len(ls[i]) > 0:
l = ls[i]
m.append(list(l))
i += 1
tiles.append((int(id), m))
i += 1
return tiles
def get_corners(m):
corners = [tuple(m[0])]
corners.append(tuple(l[len(l)-1] for l in m))
corners.append(tuple(m[len(m)-1]))
corners.append(tuple(l[0] for l in m))
return corners
tiles = read_tiles(lines)
H = len(tiles[0][1])
tile_by_id = {t[0]: t[1] for t in tiles}
corners_for_tile = {t[0]: get_corners(t[1]) for t in tiles}
tile_for_corner = {}
for id in corners_for_tile:
cs = corners_for_tile[id]
for c in cs:
c_r = c[::-1]
if c not in tile_for_corner and c_r not in tile_for_corner:
tile_for_corner[c] = set([])
if c in tile_for_corner:
tile_for_corner[c].add(id)
elif c_r in tile_for_corner:
tile_for_corner[c_r].add(id)
def find_corners(ts, t_by_c):
corners = []
for tid in ts:
non_matched = []
for c in ts[tid]:
uniq = True
if c in t_by_c and len(t_by_c[c]) > 1:
uniq = False
i_c = c[::-1]
if i_c in t_by_c and len(t_by_c[i_c]) > 1:
uniq = False
if uniq:
non_matched.append(c)
if len(non_matched) == 2:
corners.append((tid, non_matched))
return corners
def part1(c):
res = 1
for i in c:
res *= i[0]
return res
corners = find_corners(corners_for_tile, tile_for_corner)
print("Part 1: %d" % part1(corners))
c1 = corners[0]
def print_tile(mp):
for r in mp:
print("".join(r))
def rotate_map(mp):
mp2 = []
for i in range(0, len(mp)):
rw = []
for j in range(0, len(mp)):
rw.append(mp[len(mp)-j-1][i])
mp2.append(rw)
return mp2
def flip_vert_map(mp):
return mp[::-1]
def flip_hor_map(mp):
mp2 = []
for r in mp:
mp2.append(r[::-1])
return mp2
def rotate(tid, count = 1):
# print("ROTATE %d by %d" % (tid, count * 90))
for i in range(0, count):
tile_by_id[tid] = rotate_map(tile_by_id[tid])
corners_for_tile[tid] = get_corners(tile_by_id[tid])
def reverse(tid, vert):
if vert:
tile_by_id[tid] = flip_vert_map(tile_by_id[tid])
else:
tile_by_id[tid] = flip_hor_map(tile_by_id[tid])
corners_for_tile[tid] = get_corners(tile_by_id[tid])
def rotate_corner(c):
c1 = c[1][0]
c2 = c[1][1]
cs = corners_for_tile[c[0]]
c1i = cs.index(c1)
rotate(c[0], 3-c1i)
rotate_corner(c1)
def find_right_tile(tid):
r_c = corners_for_tile[tid][1]
# print("SEARCH")
r_c_i = r_c[::-1]
if r_c in tile_for_corner:
tids = tile_for_corner[r_c]
elif r_c_i in tile_for_corner:
tids = tile_for_corner[r_c_i]
else:
raise Exception("WTF???")
if len(tids) == 1:
return None
# print("FOUNDED from len %d" % (len(tids)))
r_tid = next(iter(tids.difference(set([tid]))))
# jm([[r_tid]])
r_c_i = r_c[::-1]
r_corners = corners_for_tile[r_tid]
for i in range(0, len(r_corners)):
c = r_corners[i]
if c == r_c:
if i == 0:
rotate(r_tid, 1)
reverse(r_tid, False)
elif i == 1:
reverse(r_tid, False)
elif i == 2:
rotate(r_tid, 1)
# jm([[r_tid]])
return r_tid
if c == r_c_i:
if i == 0:
rotate(r_tid, 3)
elif i == 1:
rotate(r_tid, 2)
elif i == 2:
rotate(r_tid, 1)
reverse(r_tid, True)
elif i == 3:
reverse(r_tid, True)
# jm([[r_tid]])
return r_tid
def find_bot_tile(tid):
r_c = corners_for_tile[tid][2]
# print("B SEARCH")
# print(r_c)
r_c_i = r_c[::-1]
if r_c in tile_for_corner:
tids = tile_for_corner[r_c]
elif r_c_i in tile_for_corner:
tids = tile_for_corner[r_c_i]
else:
raise Exception("WTF???")
# print(len(tids))
if len(tids) == 1:
return None
# print("FOUNDED from len %d" % (len(tids)))
r_tid = next(iter(tids.difference(set([tid]))))
# jm([[r_tid]])
r_corners = corners_for_tile[r_tid]
for i in range(0, len(r_corners)):
c = r_corners[i]
if c == r_c:
if i == 1:
rotate(r_tid, 3)
elif i == 2:
reverse(r_tid, True)
elif i == 3:
rotate(r_tid, 1)
reverse(r_tid, False)
# jm([[r_tid]])
return r_tid
if c == r_c_i:
if i == 0:
reverse(r_tid, False)
elif i == 1:
rotate(r_tid, 1)
reverse(r_tid, True)
elif i == 2:
rotate(r_tid, 2)
elif i == 3:
rotate(r_tid, 1)
# jm([[r_tid]])
return r_tid
def restore_line(s_tid):
r = [s_tid]
c_tid = s_tid
while True:
n_tid = find_right_tile(c_tid)
if n_tid is not None:
c_tid = n_tid
r.append(c_tid)
else:
return r
def jm(pt):
res = []
for r in pt:
for i in range(0, H):
l = []
for tid in r:
mp = tile_by_id[tid]
l.append("".join(mp[i]))
l.append(" ")
res.append("".join(l))
print("".join(l))
print("")
return jm
restored = []
# jm([[c1[0]]])
rl = restore_line(c1[0])
restored.append(rl)
# print(restored)
# jm(restored)
i = 0
while True:
b_t = find_bot_tile(restored[i][0])
if b_t is None:
break
restored.append(restore_line(b_t))
# print(restored)
# jm(restored)
i += 1
# print(restored)
# jm(restored)
def conc(pt):
res = []
for r in pt:
for i in range(1, H-1):
l = []
for tid in r:
mp = tile_by_id[tid]
l.append("".join(mp[i][1:-1]))
res.append("".join(l))
return res
monster = """ #
# ## ## ###
# # # # # #""".split('\n')
mp = conc(restored)
def is_monster(mp, monster, s_i, s_j):
for i in range(0, len(monster)):
for j in range(0, len(monster[i])):
if monster[i][j] == '#' and mp[s_i + i][s_j + j] != '#':
return False
return True
def count_monster(mp, monster):
result = 0
for i in range(0, len(mp)-len(monster)):
for j in range(0, len(mp[i])-len(monster[0])):
if is_monster(mp, monster, i, j):
result += 1
return result
def count_all_monster(mp, monster):
for i in range(0, 4):
c = count_monster(mp, monster)
if c > 0:
return c
c = count_monster(flip_vert_map(mp), monster)
if c > 0:
return c
c = count_monster(flip_hor_map(mp), monster)
if c > 0:
return c
mp = rotate_map(mp)
def count_non_sea(mp):
non_sea = 0
for i in range(0, len(mp)):
for j in range(0, len(mp[i])):
if mp[i][j] == '#':
non_sea += 1
return non_sea
part2 = count_non_sea(mp) - count_non_sea(monster) * count_all_monster(mp, monster)
print("Part 2: %d" % part2)
| asek-ll/aoc2020 | day20/main.py | main.py | py | 7,825 | python | en | code | 0 | github-code | 13 |
5947662768 | import nuke
import os
import logging
import json
from functools import partial
try:
if nuke.NUKE_VERSION_MAJOR < 11:
from PySide import QtCore, QtGui, QtGui as QtWidgets
from PySide.QtCore import Qt
else:
from PySide2 import QtWidgets, QtGui, QtCore
from PySide2.QtCore import Qt
except ImportError:
from Qt import QtCore, QtGui, QtWidgets
from KnobScripter import utils, snippets, widgets, config, content, ksscripteditor
code_gallery_dict = {
"blink": [
{
"title": "Kernel skeleton",
"desc": "Basic code structure for starting a Blink kernel.",
"cat": ["Base codes"],
"code": """\nkernel KernelName : ImageComputationKernel<ePixelWise>\n{\n Image<eRead, eAccessPoint, eEdgeClamped> src;\n Image<eWrite> dst;\n\n param:\n\n\n local:\n\n\n void init() {\n\n }\n\n void process(int2 pos) {\n dst() = src();\n }\n};\n""",
"editor_height": 40,
},
{
"title": "Process function",
"desc": "Example template for the main processing function in Blink.",
"cat": ["Base codes"],
"code": """void process() {\n // Read the input image\n SampleType(src) input = src();\n\n // Isolate the RGB components\n float3 srcPixel(input.x, input.y, input.z);\n\n // Calculate luma\n float luma = srcPixel.x * coefficients.x\n + srcPixel.y * coefficients.y\n + srcPixel.z * coefficients.z;\n // Apply saturation\n float3 saturatedPixel = (srcPixel - luma) * saturation + luma;\n\n // Write the result to the output image\n dst() = float4(saturatedPixel.x, saturatedPixel.y, saturatedPixel.z, input.w);\n }"""
},
{
"title": "Longer text? what would happen exactly? lets try it like right now yes yes yes yes yes ",
"desc": "Example template for the main processing function in Blink. this is the same but with a way longer description to see what happens... lets see!!!!.",
"cat": ["Base codes", "Example"],
"code": """void process() {\n // Read the input image\n SampleType(src) input = src();\n\n // Isolate the RGB components\n float3 srcPixel(input.x, input.y, input.z);\n\n // Calculate luma\n float luma = srcPixel.x * coefficients.x\n + srcPixel.y * coefficients.y\n + srcPixel.z * coefficients.z;\n // Apply saturation\n float3 saturatedPixel = (srcPixel - luma) * saturation + luma;\n\n // Write the result to the output image\n dst() = float4(saturatedPixel.x, saturatedPixel.y, saturatedPixel.z, input.w);\n }"""
},
],
"python": [
{
"title": "print statement",
"desc": "Simple print statement...",
"cat": ["Base codes"],
"code": """print("2")""",
},
],
}
def get_categories(code_dict=None):
""" Return a list of available categories for the specified code_dict (or the default one if not specified). """
code_dict = code_dict or load_code_gallery_dict(config.codegallery_user_txt_path)
categories = []
for lang in code_dict:
for code_item in code_dict[lang]:
if "cat" in code_item.keys():
cat = code_item["cat"]
if isinstance(cat, list):
categories.extend(cat)
return list(set(categories))
def load_all_code_gallery_dicts():
""" Return a dictionary that contains the code gallery dicts from all different paths. """
# TODO This function!!!! to also include the other paths, not only the user specified...
user_dict = config.code_gallery_files
full_dict = dict()
for file in config.code_gallery_files+[config.codegallery_user_txt_path]:
file_dict = load_code_gallery_dict(file)
logging.debug(file)
for key in file_dict.keys():
if key not in full_dict.keys():
full_dict[key] = []
for single_code_dict in file_dict[key]:
full_dict[key].append(single_code_dict)
logging.debug(full_dict)
return full_dict
def load_code_gallery_dict(path=None):
'''
Load the codes from the user json path as a dict. Return dict()
'''
#return code_gallery_dict #TEMPORARY
if not path:
path = config.codegallery_user_txt_path
if not os.path.isfile(path):
logging.debug("Path doesn't exist: "+path)
return dict()
else:
try:
with open(path, "r") as f:
code_dict = json.load(f)
return code_dict
except:
logging.debug("Couldn't open file: {}.\nLoading empty dict instead.".format(path))
return dict()
def save_code_gallery_dict(code_dict, path=None):
''' Perform a json dump of the code gallery into the path. '''
if not path:
path = config.codegallery_user_txt_path
with open(path, "w") as f:
json.dump(code_dict, f, sort_keys=True, indent=4)
content.code_gallery_dict = code_dict
def append_code(code, title=None, desc=None, categories = None, path=None, lang="python"):
""" Load the codegallery file as a dict and append a code. """
if code == "":
return False
path = path or config.codegallery_user_txt_path
title = title or ""
desc = desc or ""
categories = categories or get_categories()
lang = lang.lower()
all_codes = load_code_gallery_dict(path)
if code == "":
return False
if lang not in all_codes:
all_codes[lang] = []
single_code_dict = dict()
single_code_dict["title"] = title
single_code_dict["desc"] = desc
single_code_dict["cat"] = categories
single_code_dict["code"] = code
all_codes[lang].append(single_code_dict)
save_code_gallery_dict(all_codes, path)
class AppendCodePanel(QtWidgets.QDialog):
def __init__(self, parent=None, code=None, title=None, desc=None, cat=None, lang="python", path=None):
super(AppendCodePanel, self).__init__(parent)
self.lang = lang
title = title or ""
desc = desc or ""
cat = cat or []
self.path = path or config.codegallery_user_txt_path
self.existing_code_dict = load_code_gallery_dict(self.path)
self.existing_categories = get_categories(self.existing_code_dict)
# Layout
self.layout = QtWidgets.QVBoxLayout()
# Code language
self.lang_selector = widgets.RadioSelector(["Python", "Blink", "All"])
self.lang_selector.radio_selected.connect(self.change_lang)
# Title
self.title_lineedit = QtWidgets.QLineEdit(title)
f = self.title_lineedit.font()
f.setWeight(QtGui.QFont.Bold)
self.title_lineedit.setFont(f)
# Description
self.description_lineedit = QtWidgets.QLineEdit(title)
# Category
self.category_combobox = QtWidgets.QComboBox()
self.category_combobox.setEditable(True)
self.category_combobox.setSizePolicy(QtWidgets.QSizePolicy.Expanding,QtWidgets.QSizePolicy.Expanding)
#self.category_combobox.lineEdit().setText("")
self.category_combobox.addItem("","")
for cat in self.existing_categories:
self.category_combobox.addItem(str(cat), str(cat))
# Code
self.script_editor = ksscripteditor.KSScriptEditor()
self.script_editor.setPlainText(code)
se_policy = self.script_editor.sizePolicy()
se_policy.setVerticalStretch(1)
self.script_editor.setSizePolicy(se_policy)
# Warnings
self.warnings_label = QtWidgets.QLabel("Please set a code and title.")
self.warnings_label.setStyleSheet("color: #D65; font-style: italic;")
self.warnings_label.setWordWrap(True)
self.warnings_label.mouseReleaseEvent = lambda x: self.warnings_label.hide()
# Buttons
self.button_box = QtWidgets.QDialogButtonBox(
QtWidgets.QDialogButtonBox.Save | QtWidgets.QDialogButtonBox.Cancel)
self.button_box.accepted.connect(self.save_pressed)
self.button_box.rejected.connect(self.cancel_pressed)
# Form layout
self.form = QtWidgets.QFormLayout()
self.form.addRow("Language: ", self.lang_selector)
self.form.addRow("Title: ", self.title_lineedit)
self.form.addRow("Description: ", self.description_lineedit)
self.form.addRow("Category: ", self.category_combobox)
self.form.addRow("Code: ", self.script_editor)
self.form.addRow("", self.warnings_label)
self.warnings_label.hide()
self.form.setFieldGrowthPolicy(QtWidgets.QFormLayout.ExpandingFieldsGrow)
self.layout.addLayout(self.form)
self.layout.addWidget(self.button_box)
self.setLayout(self.layout)
# Init values
self.setWindowTitle("Add Code to Code Gallery")
self.lang_selector.set_button(self.lang)
self.script_editor.set_code_language(self.lang)
self.title_lineedit.setFocus()
self.title_lineedit.selectAll()
def change_lang(self, lang):
self.script_editor.set_code_language(str(lang.lower()))
def save_pressed(self):
title = self.title_lineedit.text()
description = self.description_lineedit.text()
categories_str = self.category_combobox.lineEdit().text()
categories = [c.strip() for c in categories_str.split(",")]
categories = [c for c in categories if len(c)]
code = self.script_editor.toPlainText()
lang = self.lang_selector.selected_text()
if "" in [code,title]:
self.warnings_label.show()
return False
logging.debug(
"Code to be saved \nLang:\n{0}\nTitle:\n{1}\nDescription:\n{2}\nCategory:\n{3}\nCode:\n{4}\n------".format(lang, title, description, categories, code))
append_code(code, title, description, categories, lang=lang)
code_gallery_dict = load_code_gallery_dict()
try:
content.code_gallery_dict = code_gallery_dict
except Exception as e:
logging.debug(e)
self.accept()
def cancel_pressed(self):
if self.script_editor.toPlainText() != "":
msg = "Do you wish to discard the changes?"
if not dialogs.ask(msg, self, default_yes=False):
return False
self.reject()
class CodeGalleryWidget(QtWidgets.QWidget):
def __init__(self, knob_scripter="", _parent=QtWidgets.QApplication.activeWindow(), lang="python"):
super(CodeGalleryWidget, self).__init__(_parent)
self.knob_scripter = knob_scripter
self.code_language = lang
self.initUI()
self.change_lang(self.code_language)
def initUI(self):
self.layout = QtWidgets.QVBoxLayout()
# 1. Filters (language etc)
self.filter_widget = QtWidgets.QFrame()
filter_layout = QtWidgets.QHBoxLayout()
code_language_label = QtWidgets.QLabel("Language:")
filter_layout.addWidget(code_language_label)
# TODO Compatible with expressions and TCL knobs too!!
self.lang_selector = widgets.RadioSelector(["Python", "Blink", "All"])
self.lang_selector.radio_selected.connect(self.change_lang)
filter_layout.addWidget(self.lang_selector)
filter_layout.addStretch()
self.reload_button = QtWidgets.QPushButton("Reload")
self.reload_button.clicked.connect(self.reload)
filter_layout.setMargin(0)
filter_layout.addWidget(self.reload_button)
self.filter_widget.setLayout(filter_layout)
self.layout.addWidget(self.filter_widget)
self.layout.addWidget(widgets.HLine())
# 2. Scroll Area
# 2.1. Inner scroll content
self.scroll_content = QtWidgets.QWidget()
self.scroll_layout = QtWidgets.QVBoxLayout()
self.scroll_layout.setMargin(0)
self.scroll_layout.addStretch()
self.scroll_content.setLayout(self.scroll_layout)
self.scroll_content.setContentsMargins(0, 0, 8, 0)
self.change_lang(self.code_language, force_reload=True)
# 2.2. External Scroll Area
self.scroll = QtWidgets.QScrollArea()
self.scroll.setVerticalScrollBarPolicy(Qt.ScrollBarAsNeeded)
self.scroll.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.scroll.setWidgetResizable(True)
self.scroll.setWidget(self.scroll_content)
self.scroll.setSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.MinimumExpanding)
self.layout.addWidget(self.scroll)
# 3. Lower buttons
self.lower_layout = QtWidgets.QHBoxLayout()
self.add_code_btn = widgets.APToolButton("add_filled")
self.add_code_btn.setToolTip("Add new code")
self.add_code_btn.clicked.connect(self.add_code)
self.v_expand_btn = widgets.APToolButton("v_expand", icon_size=22)
self.v_expand_btn.setToolTip("Expand all codes")
self.v_expand_btn.clicked.connect(self.expand_codes)
self.v_collapse_btn = widgets.APToolButton("v_collapse", icon_size=22)
self.v_collapse_btn.setToolTip("Collapse all codes")
self.v_collapse_btn.clicked.connect(self.collapse_codes)
self.help_btn = widgets.APToolButton("help_filled")
self.help_btn.setToolTip("Help")
self.help_btn.clicked.connect(self.show_help)
self.lower_layout.addWidget(self.add_code_btn)
self.lower_layout.addSpacing(12)
self.lower_layout.addWidget(self.v_expand_btn)
self.lower_layout.addWidget(self.v_collapse_btn)
self.lower_layout.addStretch()
self.lower_layout.addWidget(self.help_btn)
self.layout.addWidget(widgets.HLine())
self.layout.addLayout(self.lower_layout)
self.setLayout(self.layout)
def reload(self):
""" Force a rebuild of the widgets in the current filter status. """
lang = self.lang_selector.selected_text()
self.change_lang(lang, force_reload=True)
def change_lang(self, lang, force_reload=False):
""" Set the code language, clear the scroll layout and rebuild it as needed. """
lang = lang.lower()
if force_reload == False and lang == self.code_language:
logging.debug("KS: Doing nothing because the language was already selected.")
return False
elif force_reload:
pass
self.lang_selector.set_button(lang)
self.code_language = lang
logging.debug("Setting code language to " + lang)
# Clear scroll area
utils.clear_layout(self.scroll_layout)
code_gallery_dict = load_all_code_gallery_dicts()
# Build widgets as needed
if lang == "all":
for lang in code_gallery_dict.keys():
tg = widgets.ToggableGroup(self)
tg.setTitle("<big><b>{}</b></big>".format(lang.capitalize()))
self.build_gallery_group(code_gallery_dict[lang], tg.content_layout, lang=lang)
self.scroll_layout.insertWidget(-1, tg)
self.scroll_layout.addSpacing(10)
elif lang in code_gallery_dict:
self.build_gallery_group(code_gallery_dict[lang], self.scroll_layout, lang=lang)
self.scroll_layout.addStretch()
def build_gallery_group(self, code_list, layout, lang="python"):
""" Given a list of code gallery items, it builds the widgets in the given layout """
# 1. Get available categories
categories = []
for code in code_list:
for cat in code["cat"]:
categories.append(cat)
categories = list(set(categories))
# 2. Build gallery items
for cat in categories:
tg = widgets.ToggableGroup(self)
tg.setTitle("<big><b>{}</b></big>".format(cat))
for code in code_list:
if cat in code["cat"]:
cgi = self.code_gallery_item(code, lang=lang)
tg.content_layout.addWidget(cgi)
layout.insertWidget(-1, tg)
layout.addSpacing(4)
def code_gallery_item(self, code, lang="python"):
""" Given a code dict, returns the corresponding code gallery widget. """
if not all(i in code for i in ["title", "code"]):
return False
cgi = CodeGalleryItem(self)
# 1. Title/description
title = "<b>{0}</b>".format(code["title"])
if "desc" in code:
title += "<br><small style='color:#999'>{}</small>".format(code["desc"])
cgi.setTitle(title)
cgi.btn_insert_code.clicked.connect(partial(self.insert_code, cgi))
cgi.btn_save_snippet.clicked.connect(partial(self.save_snippet, cgi))
# 2. Content
cgi.script_editor.set_code_language(lang.lower())
# cgi.script_editor.setFont(config.script_editor_font)
cgi.script_editor.setPlainText(code["code"])
if "editor_height" in code:
cgi.setFixedHeight(cgi.top_layout.sizeHint().height() + 40 + code["editor_height"])
else:
cgi.setFixedHeight(cgi.top_layout.sizeHint().height() + 140)
return cgi
def add_code(self):
""" Bring up a panel to add a new code to the Code Gallery. """
codepanel = AppendCodePanel(self, lang=self.code_language)
codepanel.show()
def insert_code(self, code_gallery_item):
""" Insert the code contained in code_gallery_item in the knobScripter's texteditmain. """
self.knob_scripter = utils.getKnobScripter(self.knob_scripter)
if self.knob_scripter:
code = code_gallery_item.script_editor.toPlainText()
self.knob_scripter.script_editor.addSnippetText(code)
def save_snippet(self, code_gallery_item, shortcode=""):
""" Save the current code as a snippet (by introducing a shortcode) """
# while...
code = code_gallery_item.script_editor.toPlainText()
lang = code_gallery_item.script_editor.code_language
asp = snippets.AppendSnippetPanel(self, code, shortcode, lang=lang)
asp.show()
def all_code_groups(self):
""" Return a list of all Code Gallery Groups. """
all_scroll_widgets = (self.scroll_layout.itemAt(i).widget() for i in range(self.scroll_layout.count()))
gallery_groups = []
for g in all_scroll_widgets:
if isinstance(g, widgets.ToggableGroup):
gallery_groups.append(g)
return gallery_groups
def all_codegallery_items(self, code_groups=None):
""" Return a list of all CodeGalleryItems. """
if not code_groups:
code_groups = self.all_code_groups()
codegallery_items = []
for g in code_groups:
all_subwidgets = (g.content_layout.itemAt(i).widget() for i in range(g.content_layout.count()))
for w in all_subwidgets:
if isinstance(w, CodeGalleryItem):
codegallery_items.append(w)
return codegallery_items
def expand_codes(self):
code_groups = self.all_code_groups()
for w in code_groups + self.all_codegallery_items(code_groups):
w.setCollapsed(False)
def collapse_codes(self):
code_groups = self.all_code_groups()
for w in code_groups + self.all_codegallery_items(code_groups):
w.setCollapsed(True)
def show_help(self):
# TODO make proper help... link to pdf or video?
nuke.message("The Code Gallery is a convenient place for code reference. It allows yourself or your studio "
"to have a gallery of useful pieces of code, categorized and accompanied by a title and short "
"description. \n\n"
"Please refer to the docs for more information.")
class CodeGalleryItem(widgets.ToggableCodeGroup):
""" widgets.ToggableGroup adapted specifically for a code gallery item. """
def __init__(self, parent=None):
super(CodeGalleryItem, self).__init__(parent=parent)
self.parent = parent
# Add buttons
btn1_text = "Insert code"
self.btn_insert_code = QtWidgets.QPushButton(btn1_text)
self.btn_insert_code.setMaximumWidth(self.btn_insert_code.fontMetrics().boundingRect(btn1_text).width() + 14)
btn2_text = "Save snippet"
self.btn_save_snippet = QtWidgets.QPushButton(btn2_text)
self.btn_save_snippet.setMaximumWidth(self.btn_save_snippet.fontMetrics().boundingRect(btn2_text).width() + 14)
self.top_right_layout.addWidget(self.btn_insert_code)
self.top_right_layout.addWidget(self.btn_save_snippet)
| adrianpueyo/KnobScripter | KnobScripter/codegallery.py | codegallery.py | py | 20,754 | python | en | code | 65 | github-code | 13 |
74894602577 | import telebot
from telebot import types
from icrawler.builtin import GoogleImageCrawler
import shutil
import random
import wikipedia
import requests
import datetime
wikipedia.set_lang('ru')
bot = telebot.TeleBot('5502613023:AAFsb-kerhTpeRSCfqh1_zRnOqPCaykbbDM')
markup = types.ReplyKeyboardMarkup()
markup.add(types.KeyboardButton('/help'))
markup.add(types.KeyboardButton("Узнать погоду"))
markup.add(types.KeyboardButton('Узнать информацию по слову из Википедии'))
markup.add(types.KeyboardButton('Подобрать обои на рабочий стол'))
markup.add(types.KeyboardButton('Узнать расписание'))
markup.add(types.KeyboardButton('Полезные ссылки для студента НГТУ'))
@bot.message_handler(commands=['start'])
def start(message):
bot.send_message(message.chat.id, "Привет! Я бот-помощник) Выбери нужную функцию в меню, и я помогу тебе!", reply_markup=markup)
@bot.message_handler(commands=['help'])
def help(message):
bot.send_message(message.chat.id,"Выбери нужное действие в меню!")
def get_weather(message):
city = message.text
open_weather_token = "28c7e1a09c805fa0277d904c486ab90a"
try:
r = requests.get(
f"https://api.openweathermap.org/data/2.5/weather?q={city}&appid={open_weather_token}&units=metric&lang=ru")
data = r.json()
# pprint(data)
city = data["name"]
# description = data["weather"]["description"]
temperature = data["main"]["temp"]
feels_like = data["main"]["temp"]
humidity = data["main"]["humidity"]
pressure = data["main"]["pressure"]
wind = data["wind"]["speed"]
sunrise_timestamp = datetime.datetime.fromtimestamp(data["sys"]["sunrise"])
sunset_timestamp = datetime.datetime.fromtimestamp(data["sys"]["sunset"])
bot.send_message(message.chat.id, f"Погода в городе: {city}\nТемператра воздуха: {temperature}C\n"
f"Ощущается как: {feels_like}\n"
f"Влажность: {humidity}\nДавление: {pressure} мм.рт.ст\nСкорость ветра: {wind}\n"
f"Восход: {sunrise_timestamp}\nЗакат: {sunset_timestamp}")
except Exception as ex:
bot.send_message(message.chat.id, 'Ошибка в названии города\nПопробуйте снова')
bot.send_message(message.chat.id, 'Выберите действие в меню')
def Wallpaper(message):
bot.send_message(message.chat.id, 'Ожидайте пару секунд)')
google_crawler = GoogleImageCrawler(storage={'root_dir': 'C:/Users/malig/питон'})
google_crawler.crawl(keyword=message.text, max_num=3)
a = random.randint(1, 3)
try:
photo = open('C:/Users/malig/питон/00000' + str(a) + '.jpg', 'rb')
bot.send_photo(message.chat.id, photo)
photo.close()
except Exception:
bot.send_message(message.chat.id, 'Возникла ошибка, попробуйте еще раз')
path = "C:/Users/malig/питон"
shutil.rmtree(path)
bot.send_message(message.chat.id, 'Выберите действие в меню')
def Wikipedia_text(message):
final_message = " "
word = message.text.strip().lower()
try:
final_message = wikipedia.summary(word)
except wikipedia.exceptions.PageError:
final_message = " По вашему запросу ничего не найдено "
bot.send_message(message.chat.id, final_message, parse_mode='html')
bot.send_message(message.chat.id, 'Выберите действие в меню')
@bot.message_handler(content_types=['text'])
def start(message):
if message.text=='Подобрать обои на рабочий стол':
mesg = bot.send_message(message.chat.id, 'Я бот, который поможет тебе подобрать обои для рабочего стола или просто красивую картинку.Напиши мне слово и я подберу картинку по твоему запросу!')
bot.register_next_step_handler(mesg, Wallpaper)
if message.text == 'Узнать информацию по слову из Википедии':
mess = bot.send_message(message.chat.id,
f'Привет, <b>{message.from_user.first_name} {message.from_user.last_name}</b>\n Введите ваш запрос и я найду статью из Википедии',
parse_mode='html')
bot.register_next_step_handler(mess, Wikipedia_text)
if message.text == 'Узнать погоду':
messs = bot.send_message(message.chat.id, "Введите название города")
bot.register_next_step_handler(messs, get_weather)
if message.text == 'Полезные ссылки для студента НГТУ':
buttons = [
types.InlineKeyboardButton(text="Список преподавателей", url="https://www.nntu.ru/sveden/employees"),
types.InlineKeyboardButton(text="Четность недели", callback_data='Сейчас лето! Хороших каникул!'),
types.InlineKeyboardButton(text="Проверить успеваемость", url="https://web.archive.org/web/20211109062603/https://www.nntu.ru/content/studentam/uspevaemost")
]
keyboard = types.InlineKeyboardMarkup(row_width=1)
keyboard.add(*buttons)
bot.send_message(message.chat.id, "Кнопки-ссылки", reply_markup=keyboard)
@bot.callback_query_handler(func=lambda call: call.data == 'Сейчас лето! Хороших каникул!')
def callback_inline(call):
bot.send_message(message.chat.id, 'Сейчас лето! Хороших каникул!')
if message.text == 'Узнать расписание':
buttons = [
types.InlineKeyboardButton(text="1 курс", callback_data='1'),
types.InlineKeyboardButton(text="2 курс", callback_data='2'),
types.InlineKeyboardButton(text="3 курс", callback_data='3'),
types.InlineKeyboardButton(text="4 курс", callback_data='4'),
types.InlineKeyboardButton(text="5 курс", callback_data='5')
]
keyboard = types.InlineKeyboardMarkup(row_width=1)
keyboard.add(*buttons)
bot.send_message(message.chat.id, "Выберите курс", reply_markup=keyboard)
@bot.callback_query_handler(func=lambda call: call.data == '1')
def callback_inline_first(call):
buttons = [
types.InlineKeyboardButton(text="ИРИТ",url="https://docs.google.com/spreadsheets/d/1J3d69QH49C96uCix7vat5-qH2WZKfIRT/edit#gid=26020349"),
types.InlineKeyboardButton(text="ИТС",url="https://docs.google.com/spreadsheets/d/1FFuHQipCE3iEQB6k-iMf7I9UIEinNF-v/edit#gid=1845133819"),
types.InlineKeyboardButton(text="ИЯЭиТФ",url="https://vk.com/nnstu_ftf"),
types.InlineKeyboardButton(text="ИНЭЛ",url="https://docs.google.com/spreadsheets/d/1hmVyfTeNKXkDyiBmoTjxJnK6eVGa-sCX/edit#gid=1464892436"),
types.InlineKeyboardButton(text="ИФХТиМ",url="https://docs.google.com/spreadsheets/d/1cY13jDq64F4yov6ZjKD42_vahTysavO0/edit#gid=1433259151"),
types.InlineKeyboardButton(text="ИПТМ",url="https://docs.google.com/spreadsheets/d/1qkcDeGdJ8WoCIvPkTFfst8sNv56JtGx6/edit#gid=425044725"),
types.InlineKeyboardButton(text="ИНЭУ",url="https://vk.com/ineungtu"),
]
keyboard = types.InlineKeyboardMarkup(row_width=1)
keyboard.add(*buttons)
bot.send_message(message.chat.id, "Кнопки-ссылки на расписание", reply_markup=keyboard)
@bot.callback_query_handler(func=lambda call: call.data == '2')
def callback_inline_second(call):
buttons = [
types.InlineKeyboardButton(text="ИРИТ", url="https://docs.google.com/spreadsheets/d/1mmxdRct2KB29ZfgFV-wioSI9MnwJtyqs/edit#gid=1113528126"),
types.InlineKeyboardButton(text="ИТС",url="https://docs.google.com/spreadsheets/d/1FFuHQipCE3iEQB6k-iMf7I9UIEinNF-v/edit#gid=1845133819"),
types.InlineKeyboardButton(text="ИЯЭиТФ",url="https://vk.com/nnstu_ftf"),
types.InlineKeyboardButton(text="ИНЭЛ",url="https://docs.google.com/spreadsheets/d/1hmVyfTeNKXkDyiBmoTjxJnK6eVGa-sCX/edit#gid=1464892436"),
types.InlineKeyboardButton(text="ИФХТиМ",url="https://docs.google.com/spreadsheets/d/1cY13jDq64F4yov6ZjKD42_vahTysavO0/edit#gid=1433259151"),
types.InlineKeyboardButton(text="ИПТМ",url="https://docs.google.com/spreadsheets/d/1qkcDeGdJ8WoCIvPkTFfst8sNv56JtGx6/edit#gid=425044725"),
types.InlineKeyboardButton(text="ИНЭУ",url="https://vk.com/ineungtu"),
]
keyboard = types.InlineKeyboardMarkup(row_width=1)
keyboard.add(*buttons)
bot.send_message(message.chat.id, "Кнопки-ссылки на расписание", reply_markup=keyboard)
@bot.callback_query_handler(func=lambda call: call.data == '3')
def callback_inline_third(call):
buttons = [
types.InlineKeyboardButton(text="ИРИТ",url="https://docs.google.com/spreadsheets/d/1dsrxSTJ-xuSbp5tYEZ59qUi9_IuSGBdK/edit#gid=1802215083"),
types.InlineKeyboardButton(text="ИТС",url="https://docs.google.com/spreadsheets/d/1FFuHQipCE3iEQB6k-iMf7I9UIEinNF-v/edit#gid=1845133819"),
types.InlineKeyboardButton(text="ИЯЭиТФ",url="https://vk.com/nnstu_ftf"),
types.InlineKeyboardButton(text="ИНЭЛ",url="https://docs.google.com/spreadsheets/d/18Y5JU4_IZnz4CLlUTjIAsqV4v5u8CWm7/edit#gid=341398593"),
types.InlineKeyboardButton(text="ИФХТиМ",url="https://docs.google.com/spreadsheets/d/1nf9jYVld--MZ-CswfkzoGjASGG3PGoNn/edit#gid=2111385652"),
types.InlineKeyboardButton(text="ИПТМ",url="https://docs.google.com/spreadsheets/d/1DcnWTg81G6qOw1MjuoW3EHUTNgnBI82M/edit#gid=1162863141"),
types.InlineKeyboardButton(text="ИНЭУ",url="https://vk.com/ineungtu"),
]
keyboard = types.InlineKeyboardMarkup(row_width=1)
keyboard.add(*buttons)
bot.send_message(message.chat.id, "Кнопки-ссылки на расписание", reply_markup=keyboard)
@bot.callback_query_handler(func=lambda call: call.data == '4')
def callback_inline_four(call):
buttons = [
types.InlineKeyboardButton(text="ИРИТ",url="https://docs.google.com/spreadsheets/d/1GiGh8Yc2xwlXjX3xkMZTI0IU-duxCFQQ/edit#gid=2007860904"),
types.InlineKeyboardButton(text="ИТС",url="https://docs.google.com/spreadsheets/d/1FFuHQipCE3iEQB6k-iMf7I9UIEinNF-v/edit#gid=1845133819"),
types.InlineKeyboardButton(text="ИЯЭиТФ",url="https://vk.com/nnstu_ftf"),
types.InlineKeyboardButton(text="ИНЭЛ",url="https://docs.google.com/spreadsheets/d/1Q6M7g_qkt-Lncj-yX2qyqe21CIIFjczS/edit#gid=2125922402"),
types.InlineKeyboardButton(text="ИФХТиМ",url="https://docs.google.com/spreadsheets/d/1nf9jYVld--MZ-CswfkzoGjASGG3PGoNn/edit#gid=2111385652"),
types.InlineKeyboardButton(text="ИПТМ",url="https://docs.google.com/spreadsheets/d/1DcnWTg81G6qOw1MjuoW3EHUTNgnBI82M/edit#gid=1162863141"),
types.InlineKeyboardButton(text="ИНЭУ",url="https://vk.com/ineungtu"),
]
keyboard = types.InlineKeyboardMarkup(row_width=1)
keyboard.add(*buttons)
bot.send_message(message.chat.id, "Кнопки-ссылки на расписание", reply_markup=keyboard)
@bot.callback_query_handler(func=lambda call: call.data == '5')
def callback_inline_four(call):
buttons = [
types.InlineKeyboardButton(text="ИРИТ",url="https://docs.google.com/spreadsheets/d/1QEGTgmpK39AmPL3DbUPMPCHdH-CLQa9j/edit#gid=1333036647"),
types.InlineKeyboardButton(text="ИТС",url="https://docs.google.com/spreadsheets/d/1FFuHQipCE3iEQB6k-iMf7I9UIEinNF-v/edit#gid=1845133819"),
types.InlineKeyboardButton(text="ИЯЭиТФ",url="https://vk.com/nnstu_ftf"),
types.InlineKeyboardButton(text="ИНЭЛ",url="такого курса нет"),
types.InlineKeyboardButton(text="ИФХТиМ",url="https://docs.google.com/spreadsheets/d/1nf9jYVld--MZ-CswfkzoGjASGG3PGoNn/edit#gid=2111385652"),
types.InlineKeyboardButton(text="ИПТМ",url="https://docs.google.com/spreadsheets/d/1DcnWTg81G6qOw1MjuoW3EHUTNgnBI82M/edit#gid=1162863141"),
types.InlineKeyboardButton(text="ИНЭУ",url="https://vk.com/ineungtu"),
]
keyboard = types.InlineKeyboardMarkup(row_width=1)
keyboard.add(*buttons)
bot.send_message(message.chat.id, "Кнопки-ссылки на расписание", reply_markup=keyboard)
bot.polling(none_stop=True) | Julkinis/telegram_bot | tgbot.py | tgbot.py | py | 13,760 | python | ru | code | 0 | github-code | 13 |
30701529654 | import numpy as np
import astropy
from astropy.io import fits
import matplotlib
import matplotlib.pyplot as plt
import m2fs_process as m2fs
import os
from isolate_model_result import Model
import scipy
from scipy.spatial import distance
import mycode
matplotlib.use('TkAgg')
from matplotlib.patches import Ellipse
from pymultinest.solve import solve
from pymultinest import Analyzer
#matplotlib.use('pdf')
data_directory='/hildafs/projects/phy200028p/mgwalker/m2fs_data/'
fits_list0='all_m2fshiresian_files'
fits_list='/hildafs/projects/phy200028p/mgwalker/m2fs/'+fits_list0
model_paras_files = "/hildafs/projects/phy200028p/mgwalker/m2fs/final_mask_MLP_clamped_76c0e105c350c062a69a_iter600.pt"
model = Model(model_paras_files)
lambdamin=5127.
lambdamax=5190.
liblambdamin0=5055.
liblambdamax0=5345.
dliblambda=0.05
nliblambda=np.long((liblambdamax0-liblambdamin0)/dliblambda)+1
liblambda=np.linspace(liblambdamin0,liblambdamax0,nliblambda)
input_paras=np.array([4321.,4.321,-3.21,-0.21])#pull dummy model so we can get wavelengths
specmodel=model(input_paras)
model_wav=model.wavelength
xa=[]
xb=[]
for i in range(0,len(model_wav)):
xa.append((model_wav[i],))
xb.append((model_wav[i],))
cdist2=distance.cdist(xa,xb,metric='sqeuclidean')
sigma0=np.arange(0.01,0.26,0.01)#smoothing bandwidth array for lookup table
weights=[]
sumweights=[]
for i in range(0,len(sigma0)):
weights0=np.exp(-0.5*cdist2/sigma0[i]**2)
weights.append(weights0)
sumweights.append(np.dot(weights0,np.ones(len(model_wav))))
weights=np.array(weights)
sumweights=np.array(sumweights)
with open(fits_list) as f:
data=f.readlines()
fitsfile=[]
obj=[]
for line in data:
p=line.split()
fitsfile.append(p[0])
obj.append(p[1])
fitsfile=np.array(fitsfile)
obj=np.array(obj)
for i in range(0,len(fitsfile)):
this=np.where(fitsfile==fitsfile[i])[0]
if len(this)>1:
print('ERROR: multiple listings of same observation in input file!!!')
print(this)
np.pause()
def getmodelspec(cube,wav,spec,varspec):
from isolate_model_result import Model
deltav=cube[0]
teff=cube[1]
logg=cube[2]
feh=cube[3]
alpha=cube[4]
a0=cube[5]
a1=cube[6]
a2=cube[7]
a3=cube[8]
a4=cube[9]
a5=cube[10]
sigma=cube[11]
v1=cube[12]
v2=cube[13]
phantom=10.**cube[14]
phantom2=10.**cube[15]
v=deltav
v0=0.
input_paras=np.array([teff,logg,feh,alpha])
specmodel=model(input_paras)
model_wav=model.wavelength
dist=(sigma-sigma0)**2
best=np.argsort(dist)[0]
smoothed=np.dot(specmodel,weights[best])/sumweights[best]
c=3.e+5
lambdascale=0.5*(lambdamax-lambdamin)
lambda0=lambdamin+lambdascale
ilambdascale=1./lambdascale
ic=1./c
isigma2=1./sigma**2
vmin=v-v0-v1-v2
vmax=v+v0+v1+v2
liblambdamin=lambdamin/(1.+vmax*ic)
liblambdamax=lambdamax/(1.+vmin*ic)
jmin=int((liblambdamin-liblambdamin0)/dliblambda)
jmax=int((liblambdamax-liblambdamin0)/dliblambda)+1
j0=np.arange(jmin,jmax,1,dtype='int')
velocity=v+v0+v1*((liblambda[j0]-lambda0)*ilambdascale)+v2*((liblambda[j0]-lambda0)*ilambdascale)**2
liblambdatwiddle=liblambda[j0]*(1.+velocity*ic)
maxobscounts=np.max(spec[((varspec>0.)&(varspec<1.e+10))])
polynomial=maxobscounts*a0+maxobscounts*(
a1*((wav-lambda0)*ilambdascale)**1
+a2*((wav-lambda0)*ilambdascale)**2
+a3*((wav-lambda0)*ilambdascale)**3
+a4*((wav-lambda0)*ilambdascale)**4
+a5*((wav-lambda0)*ilambdascale)**5)
# print(len(wav),len(liblambdatwiddle),len(smoothed),maxobscounts)
interp=np.interp(wav,liblambdatwiddle,smoothed[jmin:jmax])
return polynomial*interp
def run_nest(wav,spec,varspec,mask,prefix):
from isolate_model_result import Model
def myprior(cube):
prior=[]
prior.append([-500.,500.])#log of scaling constant that sets member fraction
prior.append([3900.,7500.])
prior.append([0.,5.])
prior.append([-4.,0.5])
prior.append([-0.8,1.])
prior.append([-1.,1.])
prior.append([-1.,1.])
prior.append([-1.,1.])
prior.append([-1.,1.])
prior.append([-1.,1.])
prior.append([-1.,1.])
prior.append([0.06,0.120])
prior.append([-10.,10.])
prior.append([-10.,10.])
prior.append([-1.,6.])
prior.append([-2.,2.])
prior=np.array(prior)
x=np.array(cube)
for i in range(0,len(x)):
x[i]=prior[i][0]+(prior[i][1]-prior[i][0])*cube[i]
return x
def myloglike(cube):
libsmooth=getmodelspec(cube,wav,spec,varspec)
# deltav=cube[0]
teff=cube[1]
logg=cube[2]
feh=cube[3]
alpha=cube[4]
# a0=cube[5]
# a1=cube[6]
# a2=cube[7]
# a3=cube[8]
# a4=cube[9]
# a5=cube[10]
# sigma=cube[11]
# v1=cube[12]
# v2=cube[13]
phantom=10.**cube[14]
phantom2=10.**cube[15]
# v=deltav
# v0=0.
# input_paras=np.array([teff,logg,feh,alpha])
# specmodel=model(input_paras)
# model_wav=model.wavelength
# weights=[]
# for j in range(0,len(model_wav)):
# weights.append(np.sum(np.exp(-0.5*cdist2[j]/sigma**2)))
# weights=np.array(weights)
# smoothed=[]
# for j in range(0,len(model_wav)):
# smoothed.append(np.sum(specmodel*np.exp(-0.5*cdist2[j]/sigma**2))/weights[j])
# smoothed=np.array(smoothed)
# c=3.e+5
# lambdascale=0.5*(lambdamax-lambdamin)
# lambda0=lambdamin+lambdascale
# ilambdascale=1./lambdascale
# ic=1./c
# isigma2=1./sigma**2
# vmin=v-v0-v1-v2
# vmax=v+v0+v1+v2
# liblambdamin=lambdamin/(1.+vmax*ic)
# liblambdamax=lambdamax/(1.+vmin*ic)
# jmin=int((liblambdamin-liblambdamin0)/dliblambda)
# jmax=int((liblambdamax-liblambdamin0)/dliblambda)+1
# j0=np.arange(jmin,jmax,1,dtype='int')
# velocity=v+v0+v1*((liblambda[j0]-lambda0)*ilambdascale)+v2*((liblambda[j0]-lambda0)*ilambdascale)**2
# liblambdatwiddle=liblambda[j0]*(1.+velocity*ic)
# maxobscounts=np.max(spec[((varspec>0.)&(varspec<1.e+10))])
# polynomial=maxobscounts*a0+maxobscounts*(
# a1*((wav-lambda0)*ilambdascale)**1
# +a2*((wav-lambda0)*ilambdascale)**2
# +a3*((wav-lambda0)*ilambdascale)**3
# +a4*((wav-lambda0)*ilambdascale)**4
# +a5*((wav-lambda0)*ilambdascale)**5)
# interp=np.interp(wav,liblambdatwiddle,smoothed[jmin:jmax])
# libsmooth=polynomial*interp
keep=np.where(varspec<1.e+8)[0]
sum1=-0.5*np.log(2.*np.pi)*int(len(keep))
sum2=-0.5*np.sum(np.log(phantom2*varspec[keep]+phantom**2))
sum3=-0.5*np.sum((spec[keep]-libsmooth[keep])**2/(phantom2*varspec[keep]+phantom**2))
logl=sum1+sum2+sum3
if ((feh<-2.5)&(logg>4.5)):
logl=-1.e+30
if ((teff>6000.)&(logg<1.)):
logl=-1.e+30
return logl
parameters=['1','2','3','4','5','6','7','8','9','10','11','12','13','14','15','16']
n_params=len(parameters)
result=solve(LogLikelihood=myloglike,Prior=myprior,n_dims=n_params,outputfiles_basename=prefix,verbose=True,resume=True,init_MPI=False,use_MPI=False)
return result
for i in range(0,len(fitsfile)):
print(i,' of ',len(fitsfile))
hdul=fits.open(fitsfile[i])
fitsobject=m2fs.m2fs_getfromfits(hdul)
filtername=hdul[0].header['filtername']
m2fsrun=hdul[0].header['m2fsrun']
field_name=hdul[0].header['field_name']
temperature=hdul[0].header['dome_temp']
hdul.close()
root=[]
root2=[]
for j in range(0,len(fitsobject.obj)):
root.append('m2fs_ian_'+m2fsrun+'_'+obj[i]+'_'+field_name+'_'+filtername+'_ra'+str.format('{0:.6f}',round(fitsobject.radeg[j],6)).zfill(6)+'_dec'+str.format('{0:.6f}',round(fitsobject.decdeg[j],6)).zfill(6)+'_hjd'+str.format('{0:.3f}',round(fitsobject.hjd[j],3))+'_ap'+fitsobject.channel[j]+str(int(fitsobject.aperture[j])).zfill(3))
root2.append('m2fs_ian_'+m2fsrun+'_'+obj[i]+'_'+field_name+'_'+filtername+'_ra'+str.format('{0:.6f}',round(fitsobject.radeg[j],6)).zfill(6)+'_dec'+str.format('{0:.6f}',round(fitsobject.decdeg[j],6)).zfill(6)+'_hjd'+str.format('{0:.3f}',round(fitsobject.hjd[j],3))+'_ap'+fitsobject.channel[j]+str(int(fitsobject.aperture[j])).zfill(3))
root=np.array(root)
root2=np.array(root2)
skies=np.where(fitsobject.obj=='SKY')[0]
targets=np.where(fitsobject.icode>0)[0]
if len(targets)==0:
print('WARNING: NO TARGETS')
for j in targets:
out=data_directory+root[j]+'_skysub.dat'
if ((len(np.where(fitsobject.mask[j]==False)[0])>0)&(fitsobject.filtername[j]=='HiRes')):#write skysub.dat file only if >100 good pixels in spectrum
wav=[]
spec=[]
varspec=[]
mask=[]
for k in range(0,len(fitsobject.wav[j])):
wav.append(fitsobject.wav[j][k])
spec.append(fitsobject.spec[j][k])
varspec.append(fitsobject.var[j][k])
mask.append(fitsobject.mask[j][k])
wav=np.array(wav)
spec=np.array(spec)
varspec=np.array(varspec)
mask=np.array(mask)
cube=[]
cube.append(0.)#log of scaling constant that sets member fraction
cube.append(5800.)
cube.append(4.7)
cube.append(0.)
cube.append(0.)
cube.append(0.1)
cube.append(0.2)
cube.append(0.3)
cube.append(0.4)
cube.append(0.5)
cube.append(0.6)
cube.append(0.09)
cube.append(0.)
cube.append(0.)
cube.append(0.)
cube.append(0.)
cube=np.array(cube)
# shite=getmodelspec(cube,wav,spec,varspec)
# np.pause()
prefix='/hildafs/projects/phy200028p/mgwalker/m2fs/chains/'+root2[j]
shite=run_nest(wav,spec,varspec,mask,prefix)
# a=Analyzer(n_params,outputfiles_basename=prefix)
# bestfit=a.get_best_fit()
| mgwalkergit/spec | m2fs_fitspectra.py | m2fs_fitspectra.py | py | 10,297 | python | en | code | 0 | github-code | 13 |
35256705805 | import logging
import os
from logging.handlers import RotatingFileHandler
from pathlib import Path
from mb_commons import Scheduler
from app.config import AppConfig
from app.core.db import DB
from app.core.services.system_service import SystemService
from app.core.services.worker_service import WorkerService
class Core:
def __init__(self, config: AppConfig):
self.config = config
self.log = logging.getLogger("app")
self.init_logger()
self.db: DB = DB(config.database_url)
self.system_service: SystemService = SystemService(config, self.log, self.db)
self.worker_service: WorkerService = WorkerService(config, self.log, self.db, self.system_service)
self.scheduler = self.init_scheduler()
self.startup()
self.log.info("app started")
def init_scheduler(self) -> Scheduler:
scheduler = Scheduler(self.log)
scheduler.add_job(self.worker_service.process_workers, 2)
scheduler.start()
self.log.debug("scheduler started")
return scheduler
def init_logger(self):
Path(self.config.data_dir).mkdir(exist_ok=True)
self.log.setLevel(logging.DEBUG if self.config.debug else logging.INFO)
self.log.propagate = False
fmt = logging.Formatter(fmt="%(asctime)s - %(name)s - %(levelname)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG)
console_handler.setFormatter(fmt)
self.log.addHandler(console_handler)
file_handler = RotatingFileHandler(f"{self.config.data_dir}/app.log", maxBytes=10 * 1024 * 1024, backupCount=1)
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(fmt)
self.log.addHandler(file_handler)
def startup(self):
pass
def shutdown(self):
self.scheduler.stop()
self.db.close()
self.log.info("app stopped")
# noinspection PyUnresolvedReferences,PyProtectedMember
os._exit(0)
| max-block/demo-fastapi | app/core/core.py | core.py | py | 2,053 | python | en | code | 0 | github-code | 13 |
70942051217 | import random
guess = ''
answers = ('heads', 'tails')
while guess not in answers:
print('Guess the coin toss! Enter heads or tails:')
guess = input()
toss = random.randint(0, 1) # 0 is tails, 1 is heads
if answers[toss] == guess:
print('You got it!')
else:
print('Nope! Guess again!')
guess = input()
if answers[toss] == guess:
print('You got it!')
else:
print('Nope. You are really bad at this game.')
| danhuynhdev/automateboringstuff | chapter10/debug.py | debug.py | py | 448 | python | en | code | 0 | github-code | 13 |
6916789067 | class Neighbors:
def __init__(self, nbs):
self.nw = nbs[0]
self.n = nbs[1]
self.ne = nbs[2]
self.w = nbs[3]
self.c = nbs[4]
self.e = nbs[5]
self.sw = nbs[6]
self.s = nbs[7]
self.se = nbs[8]
@staticmethod
def get_neighbors(x, y, img):
(w,h) = img.size
neighbors_list = []
for y_offset in [-1, 0, 1]:
for x_offset in [-1, 0, 1]:
px = (0,0,0,0)
try:
px = img.getpixel((x+x_offset,y+y_offset))
except:
pass
neighbors_list.append(px)
return Neighbors(neighbors_list)
def different(self):
res = []
if self.n != self.c:
res.append(('n', self.n))
if self.e != self.c:
res.append(('e', self.e))
if self.w != self.c:
res.append(('w', self.w))
if self.s != self.c:
res.append(('s', self.s))
return res
def code(self):
return 'context-{}-{}-{}-{}-{}-{}-{}-{}-{}'.format(
str(self.nw),
str(self.n),
str(self.ne),
str(self.w),
str(self.c),
str(self.e),
str(self.sw),
str(self.s),
str(self.se)
)
| steven-gomez/mapkernel | src/neighbors.py | neighbors.py | py | 1,347 | python | en | code | 0 | github-code | 13 |
27236893047 | from typing import Union, Sequence, List, Tuple, Optional, Dict, Any, Iterator
from abc import ABC, abstractmethod
import numpy
import sys
from mini_op2.framework.core import *
from mini_op2.framework.system import SystemInstance, SystemSpecification
from mini_op2.framework.user_code_parser import scan_code, VarUses
class Statement(ABC):
def _eval_statements(self, instance:SystemInstance, stats:Sequence["Statement"]):
for s in stats:
s.execute(instance)
def _import_statements(self, spec:SystemSpecification, stats:Sequence["Statement"]) -> Sequence["Statement"]:
global _scalar_statement_unq
res=[]
for s in stats:
if isinstance(s,str):
uses=scan_code(spec,s)
unq=_scalar_statement_unq
_scalar_statement_unq+=1
args=[ GlobalArgument(mode,spec.globals[var]) for (var,mode) in uses.get_args() ]
name="controller_{}".format(unq)
(func,ast)=uses.create_func_and_ast(name)
stat=UserCode( func, ast, *args)
stat.id=name
res.append(stat)
else:
res.append(s)
return res
def _on_bind_spec_local(self, spec:SystemSpecification) -> None:
pass
def __init__(self):
# Used to assign globally unique ids later one
self.id=None # type: Optional[str]
def on_bind_spec(self, spec:SystemSpecification) -> "Statement":
self._on_bind_spec_local(spec)
for s in self.children():
s.on_bind_spec(spec)
return self
def children(self) -> Iterator["Statement"]:
yield from []
def all_statements(self) -> Iterator["Statement"]:
yield self
for s in self.children():
yield from s.all_statements()
@abstractmethod
def execute(self, instance:SystemInstance) -> None:
raise NotImplementedError()
class UsesStatement(Statement):
"""This is a stupid name. It is code that was turned from a string into a function for scalar code."""
def __init__(self, uses:VarUses) -> None:
self.uses=uses
def children(self) -> Iterator[Statement]:
pass
def execute(self, instance:SystemInstance) -> None:
self.uses.execute(instance)
_scalar_statement_unq=0
class CompositeStatement(Statement):
def __init__(self):
pass
class Seq(CompositeStatement):
def __init__(self, *statements:Statement ) -> None:
super().__init__()
self.statements=list(statements) # type:Sequence[Statement]
def _on_bind_spec_local(self, spec:SystemSpecification) -> None:
self.statements=self._import_statements(spec, self.statements)
def children(self) -> Iterator[Statement]:
yield from self.statements
def execute(self, instance:SystemInstance) -> None:
self._eval_statements(instance, self.statements)
class Par(CompositeStatement):
def __init__(self, *statements:Statement) -> None:
super().__init__()
self.statements=list(statements)
def _on_bind_spec_local(self, spec:SystemSpecification) -> None:
self.statements=self._import_statements(spec, self.statements)
def children(self) -> Iterator[Statement]:
yield from self.statements
def execute(self, instance:SystemInstance) -> None:
self._eval_statements(instance, self.statements)
class RepeatForCount(CompositeStatement):
def __init__(self, count:int, variable:MutableGlobal, *statements:Statement) -> None:
super().__init__()
self.count=count
self.variable=variable
self.statements=list(statements)
def _on_bind_spec_local(self, spec:SystemSpecification) -> None:
self.statements=self._import_statements(spec, self.statements)
def children(self) -> Iterator[Statement]:
yield from self.statements
def execute(self, instance:SystemInstance) -> None:
for i in range(self.count):
instance.globals[self.variable][0]=i # Update global
self._eval_statements(instance, self.statements)
class Execute(Statement):
def __init__(self):
super().__init__()
def _get_current(self, instance:SystemInstance, iter_index:Union[int,None], arg:Argument):
"""Get the current value of the argument.
iter_index : Set index in a parallel context, None in a scalar context
"""
if isinstance(arg,GlobalArgument):
current=instance.globals[arg.global_]
elif isinstance(arg,DirectDatArgument):
assert iter_index is not None
current=instance.dats[arg.dat][iter_index]
elif isinstance(arg,IndirectDatArgument):
assert iter_index is not None
map=instance.maps[arg.map]
if arg.index>=0:
indirect_index=map[iter_index][arg.index]
current=instance.dats[arg.dat][indirect_index]
else:
assert arg.index == -arg.map.arity
dat=instance.dats[arg.dat]
current=[ dat[map[iter_index][i]] for i in range(arg.map.arity) ]
#print("indirect {} = {}".format(arg.dat.id,current))
else:
raise RuntimeError("Unknown arg type.")
return current
def _get_all_current(self, instance:SystemInstance, iter_index:Union[int,None], args:List[Argument]):
return [self._get_current(instance, iter_index, arg) for arg in args ]
def _arg_pre(self, instance:SystemInstance, arg:Argument, current:numpy.ndarray) -> numpy.ndarray:
"""For a given argument and value, prepare the input to the kernel.
"""
if arg.access_mode==AccessMode.INC:
if isinstance(arg,IndirectDatArgument) and arg.index < 0:
return [arg.data_type.create_default_value() for x in current]
else:
return arg.data_type.create_default_value() # Create zeros
elif arg.access_mode==AccessMode.WRITE:
# Scramble current value
if isinstance(arg,IndirectDatArgument) and arg.index < 0:
for i in range(-arg.index):
numpy.copyto(current[i], arg.data_type.create_random_value()) # type:ignore
else:
numpy.copyto(current, arg.data_type.create_random_value()) # type:ignore
return current
elif arg.access_mode==AccessMode.READ:
if isinstance(arg,IndirectDatArgument) and arg.index < 0:
return [current[i].copy() for i in range(len(current))]
else:
return current.copy()
elif arg.access_mode==AccessMode.RW:
return current
else:
raise RuntimeError("Unknown access mode.")
def _all_args_pre(self, instance:SystemInstance, args:List[Argument], current:Sequence[numpy.ndarray]):
res=[]
for (i,(arg,val)) in enumerate(zip(args,current)):
try:
res.append(self._arg_pre(instance, arg, val))
except Exception as e:
raise RuntimeError("Couldn't prepare argument index {} with arg={} and val={}".format(i,arg,val)) from e
return res
def _arg_post(self, instance:SystemInstance, arg:Argument, current:numpy.ndarray, new:numpy.ndarray) -> None:
"""For a given argument and previous value, apply the result from the kernel."""
if arg.access_mode==AccessMode.INC:
if isinstance(arg,IndirectDatArgument) and arg.index < 0:
for i in range(len(current)):
arg.data_type.inc_value(current[i], new[i])
else:
arg.data_type.inc_value(current, new)
elif arg.access_mode==AccessMode.WRITE:
pass # Should have been modified in place, otherwise left random
elif arg.access_mode==AccessMode.READ:
if isinstance(arg,IndirectDatArgument) and arg.index < 0:
for i in range(len(current)):
assert (current[i]==new[i]).all()
else:
assert (current==new).all()
elif arg.access_mode==AccessMode.RW:
pass # Will have modified in place if it wanted to
else:
raise RuntimeError("Unknown access mode.")
def _all_args_post(self, instance:SystemInstance, args:List[Argument], current:Sequence[numpy.ndarray], newVals:Sequence[numpy.ndarray]) -> None:
for (arg,val,new) in zip(args,current,newVals):
self._arg_post(instance, arg, val, new)
class While(Execute):
def _init_func(self, spec:SystemSpecification):
global _scalar_statement_unq
code="_cond_[0] = "+self.expression
uses=scan_code(spec,code)
unq=_scalar_statement_unq
_scalar_statement_unq+=1
args=[ GlobalArgument(mode,spec.globals[var]) for (var,mode) in uses.get_args() ]
name="controller_expr_{}".format(unq)
(func,ast)=uses.create_func_and_ast(name)
self.id=name
self.expr_ast=ast
self.expr_func=func
self.arguments=args
def __init__(self, expression:str, *statements:Statement) -> None:
super().__init__()
self.expression=expression
self.statements=list(statements)
self.id=None
self.expr_ast=None
self.expr_func=None
self.arguments=None
def children(self) -> Iterator[Statement]:
yield from self.statements
def _on_bind_spec_local(self, spec:SystemSpecification) -> None:
self._init_func(spec)
self.statements=self._import_statements(spec, self.statements)
def execute(self, instance:SystemInstance) -> None:
if self.expr_func is None:
self._init_func(instance.spec)
while True:
current=self._get_all_current(instance, None, self.arguments)
vals=self._all_args_pre(instance, self.arguments, current)
self.expr_func(*vals)
self._all_args_post(instance, self.arguments, current, vals)
logging.info("globals= %s", ",".join([g.id for g in instance.globals]))
val=instance.globals[instance.spec.globals["_cond_"]]
if not val:
break
self._eval_statements(instance, self.statements)
class ParFor(Execute):
def __init__(self,
kernel:Callable[...,None],
iter_set:Set,
*arguments:Argument
) -> None :
super().__init__()
for (i,a) in enumerate(arguments):
if isinstance(a,DatArgument):
assert a.iter_set==iter_set
if hasattr(kernel,"__name__"):
self.name=kernel.__name__
else:
self.name="<unknown>"
self.kernel=kernel
self.iter_set=iter_set
self.arguments=list(arguments)
def execute(self, instance:'SystemInstance') -> None:
iter_size=instance.sets[self.iter_set]
try:
for ci in range(iter_size):
current=self._get_all_current(instance, ci, self.arguments)
vals=self._all_args_pre(instance, self.arguments, current)
self.kernel(*vals)
self._all_args_post(instance, self.arguments, current, vals)
except Exception as e:
raise RuntimeError("While executing kernel {} over set {}".format(self.name, self.iter_set.id)) from e
class UserCode(Execute):
def __init__(self,
code:Callable[...,None],
ast:any,
*arguments:Argument
) -> None :
super().__init__()
assert ast
for (i,a) in enumerate(arguments):
assert isinstance(a,GlobalArgument)
self.code=code
self.ast=ast
self.arguments=list(arguments)
def execute(self, instance:'SystemInstance') -> None:
current=self._get_all_current(instance, None, self.arguments)
vals=self._all_args_pre(instance, self.arguments, current)
self.code(*vals)
self._all_args_post(instance, self.arguments, current, vals)
class Debug(Statement):
def __init__(self,
callback:Callable[[SystemInstance],None]
) -> None :
super().__init__()
self.callback=callback
def execute(self, instance:'SystemInstance') -> None:
self.callback(instance)
class CheckState(Statement):
def __init__(self,
src:Any,
pattern:str
) -> None:
super().__init__()
self.src=src
self.pattern=pattern
def execute(self, instance:'SystemInstance') -> None:
globals={ g.id:instance.globals[g][0] for g in instance.spec.globals.values() }
pattern=self.pattern.format(**globals)
logging.debug("Checking for reference pattern {}".format(pattern))
instance.check_snapshot_if_present(instance.spec, self.src, pattern)
| joshjennings98/fyp | graph_schema-4.2.0/apps/nursery/op2/mini_op2/framework/control_flow.py | control_flow.py | py | 13,165 | python | en | code | 0 | github-code | 13 |
22645880702 | import time
from retry import retry
from threadlocal_aws.clients import ec2, route53
from threadlocal_aws.resources import ec2 as ec2_resource
from ec2_utils.instance_info import info
def associate_eip(
eip=None, allocation_id=None, eip_param=None, allocation_id_param=None
):
if not allocation_id:
if eip:
address_data = ec2().describe_addresses(PublicIps=[eip])
if (
"Addresses" in address_data
and len(address_data["Addresses"]) > 0
and "AllocationId" in address_data["Addresses"][0]
):
allocation_id = address_data["Addresses"][0]["AllocationId"]
if not allocation_id:
if not allocation_id_param:
allocation_id_param = "paramEipAllocationId"
allocation_id = info().stack_data(allocation_id_param)
if not allocation_id:
if not eip:
if not eip_param:
eip_param = "paramEip"
eip = info().stack_data(eip_param)
address_data = ec2().describe_addresses(PublicIps=[eip])
if (
"Addresses" in address_data
and len(address_data["Addresses"]) > 0
and "AllocationId" in address_data["Addresses"][0]
):
allocation_id = address_data["Addresses"][0]["AllocationId"]
print("Allocating " + allocation_id + " on " + info().instance_id())
ec2().associate_address(
InstanceId=info().instance_id(),
AllocationId=allocation_id,
AllowReassociation=True,
)
info().clear_cache()
def create_eni(subnet_id):
iface = ec2_resource().create_network_interface(SubnetId=subnet_id)
return _retry_eni_status(iface.id, "available")
def get_eni(eni_id):
return ec2_resource().NetworkInterface(eni_id)
def list_attachable_enis():
return ec2_resource().network_interfaces.filter(
Filters=[
{"Name": "availability-zone", "Values": [info().availability_zone()]},
{"Name": "status", "Values": ["available"]},
]
)
def list_attachable_eni_ids():
return [eni.id for eni in list_attachable_enis()]
def list_compatible_subnets():
return ec2_resource().subnets.filter(
Filters=[{"Name": "availability-zone", "Values": [info().availability_zone()]}]
)
def list_compatible_subnet_ids():
return [subnet.id for subnet in list_compatible_subnets()]
def attach_eni(eni_id):
iface = ec2_resource().NetworkInterface(eni_id)
iface.attach(
DeviceIndex=info().next_network_interface_index(),
InstanceId=info().instance_id(),
)
iface = _retry_eni_status(iface.id, "in-use")
info().clear_cache()
return iface
def detach_eni(eni_id, delete=False):
iface = ec2_resource().NetworkInterface(eni_id)
iface.detach()
if iface.status != "available":
iface = _retry_eni_status(iface.id, "available")
time.sleep(3)
if delete:
iface.delete()
info().clear_cache()
@retry(tries=60, delay=2, backoff=1)
def _retry_eni_status(eni_id, status):
iface = ec2_resource().NetworkInterface(eni_id)
if iface.status != status:
raise Exception("eni " + eni_id + " not " + status)
return iface
def register_private_dns(dns_name, hosted_zone, ttl=None, private_ip=None):
if not ttl:
ttl = 60
else:
ttl = int(ttl)
if not private_ip:
private_ip = info().private_ip()
zone_id = None
zone_paginator = route53().get_paginator("list_hosted_zones")
for page in zone_paginator.paginate():
for zone in page.get("HostedZones", []):
if zone["Name"] == hosted_zone:
zone_id = zone["Id"]
break
if zone_id:
break
if not zone_id:
raise Exception("Failed to get zone id for zone " + hosted_zone)
route53().change_resource_record_sets(
HostedZoneId=zone_id,
ChangeBatch={
"Changes": [
{
"Action": "UPSERT",
"ResourceRecordSet": {
"Name": dns_name,
"Type": "A",
"TTL": ttl,
"ResourceRecords": [{"Value": private_ip}],
},
}
]
},
)
| NitorCreations/ec2-utils | ec2_utils/interface.py | interface.py | py | 4,325 | python | en | code | 1 | github-code | 13 |
71253828819 | from typing import List
from copy import deepcopy
import torch
import torch.nn as nn
from catalyst import utils
def get_network(params):
params = deepcopy(params)
if(params["type"] == "lstm"):
return _get_lstm_net(**params)
else:
return _get_linear_net(**params)
# Here we define our model as a class
class LSTM(nn.Module):
def __init__(self, input_dim, hidden_dim, seq_len,
num_layers=2,activation_fn = nn.ReLU):
super(LSTM, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.seq_len = seq_len # sequation dim
self.num_layers = num_layers
# Building your LSTM
# batch_first=True causes input/output tensors to be of shape
# (batch_dim, seq_dim, feature_dim)
self.lstm = nn.LSTM(input_dim, hidden_dim, num_layers, batch_first=True)
# Readout layer
self.fc = nn.Linear(hidden_dim, hidden_dim)
def init_hidden(self, batch_size):
# This is what we'll initialise our hidden state as
return (torch.zeros(self.num_layers, batch_size, self.hidden_dim),
torch.zeros(self.num_layers, batch_size, self.hidden_dim))
def forward(self, input):
try:
if(len(input.shape) == 2 ):
if(input.shape[0] == 1):
input = input.reshape(5,-1).unsqueeze(0)
else:
input = input.reshape(input.shape[0],self.seq_len,-1)
batch_size, _, _ = input.shape
(h0,c0) = self.init_hidden(batch_size)
# Forward pass through LSTM layer
# shape of lstm_out: [input_size, batch_size, hidden_dim]
# shape of self.hidden: (a, b), where a and b both
# have shape (num_layers, batch_size, hidden_dim).
lstm_out, self.hidden = self.lstm(input,(h0.detach(), c0.detach()))
# Only take the output from the final timetep and passit through fc
return self.fc(lstm_out[:,-1,:])
except:
import ipdb; ipdb.set_trace()
def _get_lstm_net(
type: "lstm",
in_features: int,
hidden_dim: int,
num_layers: int,
history_len: int = 1,
features: List = None,
#use_bias: bool = False, # Not needed
#use_normalization: bool = False, # Not needed
#use_dropout: bool = False,
activation: str = "ReLU"
) -> nn.Module:
activation_fn = torch.nn.__dict__[activation]
net = LSTM(in_features, hidden_dim, history_len, num_layers, activation_fn)
return net
def _get_linear_net(
type: "linear",
in_features: int,
history_len: int = 1,
features: List = None,
use_bias: bool = False,
use_normalization: bool = False,
use_dropout: bool = False,
activation: str = "ReLU"
) -> nn.Module:
features = features or [16, 32, 16]
activation_fn = torch.nn.__dict__[activation]
def _get_block(**linear_params):
layers = [nn.Linear(**linear_params)]
if use_normalization:
layers.append(nn.LayerNorm(linear_params["out_features"]))
if use_dropout:
layers.append(nn.Dropout(p=0.1))
layers.append(activation_fn(inplace=True))
return layers
features.insert(0, history_len * in_features)
params = []
for i, (in_features, out_features) in enumerate(utils.pairwise(features)):
params.append(
{
"in_features": in_features,
"out_features": out_features,
"bias": use_bias,
}
)
layers = []
for block_params in params:
layers.extend(_get_block(**block_params))
net = nn.Sequential(*layers)
net.apply(utils.create_optimal_inner_init(activation_fn))
return net
class StateNet(nn.Module):
def __init__(
self,
main_net: nn.Module,
):
super().__init__()
self.main_net = main_net
def forward(self, state):
batch_size, _, _ = state.shape
x = state.contiguous().view(batch_size, -1)
x = self.main_net(x)
return x
@classmethod
def get_from_params(
cls,
# aggregation_net_params=None,
main_net_params=None,
) -> "StateNet":
main_net_params = deepcopy(main_net_params)
main_net = _get_lstm_net(**main_net_params)
net = cls(
main_net=main_net
)
return net
class StateActionNet(nn.Module):
def __init__(
self,
state_net: nn.Module = None,
action_net: nn.Module = None,
main_net: nn.Module = None
):
super().__init__()
self.main_net = main_net
self.action_net = action_net
self.state_net = state_net
def forward(self, state, action):
state = self.state_net(state)
action = self.action_net(action)
x = torch.cat([state, action], dim=1)
x = self.main_net(x)
return x
@classmethod
def get_from_params(
cls,
state_net_params=None,
action_net_params=None,
main_net_params=None,
) -> "StateActionNet":
state_net = get_network(state_net_params)
main_net = get_network(main_net_params)
action_net = get_network(action_net_params)
net = cls(
state_net=state_net,
action_net=action_net,
main_net=main_net
)
return net
| denizdurduran/dicmar | src/network_lstm.py | network_lstm.py | py | 5,475 | python | en | code | 1 | github-code | 13 |
24574382243 | # Exercício 092 do curso de Python - Curso em vídeo
# Crie um programa que leia nome, ano de nascimento e carteira de trabalho e cadastre-o
# (com idade) em um dicionário. Se por acaso a CTPS for diferente de ZERO,
# o dicionário receberá também o ano de contratação e o salário.
# Calcule e acrescente, além da idade, com quantos anos a pessoa vai se aposentar.
# Meu Código
from datetime import datetime
print('=' * 37)
print(' ' * 2 + 'Cadastro de Trabalhador em Python')
print('=' * 37)
trabalhador = dict()
trabalhador['Nome'] = str(input('Nome: ').title().strip())
nasc = int(input('Ano de Nascimento: '))
trabalhador['Idade'] = datetime.now().year - nasc
trabalhador['CTPS'] = int(input('Carteira de Trabalho (0 não tem): '))
if trabalhador['CTPS'] != 0:
trabalhador['Contratação'] = int(input('Ano de contratação: '))
trabalhador['Salário'] = float(input('Salário: R$'))
# Corrigindo não é o ano, mas sim com que idade:
trabalhador['Aposentadoria'] = (trabalhador['Contratação'] + 35) - nasc
# Idade mínima considerando sexo masculino.
if trabalhador['Aposentadoria'] < 65:
trabalhador['Aposentadoria'] = 65
print('-=' * 30)
for i, v in trabalhador.items():
print(f'{i}: {v}')
# Correção - Ok
| felipecabraloliveira/Python | curso-de-python-curso-em-video/scripts/exercicios/ex092.py | ex092.py | py | 1,268 | python | pt | code | 0 | github-code | 13 |
24463290429 | from pandas import read_csv
from enum import Enum
class Locations(Enum):
"""Encapsulates store location strings"""
BACKGROUND = "#"
CHECKOUT = "C"
CUSTOMER = "K"
DAIRY = "D"
DRINKS = "L"
ENTRANCE = "G"
EXIT = "E"
FRUIT = "F"
SPICES = "S"
# Paths
PATH_SUPERMARKETMAP = "images/supermarket.png"
PATH_TILES = "images/tiles.png"
# Simulation
SIMULATION_DURATION = 20 # Duration of simulation
CUSTOMER_ARRIVAL_RATE = (0, 3) # min max
TRANS_PROB_MATRIX = read_csv("data/transitional_probabilities.csv").set_index("before")
# Visualization
MARKET = """
####################
##................##
##L..LD..DS..SF..F##
##L..LD..DS..SF..F##
##L..LD..DS..SF..F##
##L..LD..DS..SF..F##
##L..LD..DS..SF..F##
##................##
##...CC..CC..CC...##
##...CC..CC..CC...##
##................##
###E############G###
""".strip()
STORE_LOCATIONS = {
Locations.CHECKOUT.value: ((4, 15), (8, 9)),
Locations.DAIRY.value: ((7, 8), (2, 6)),
Locations.DRINKS.value: ((3, 4), (2, 6)),
Locations.FRUIT.value: ((15, 16), (2, 6)),
Locations.EXIT.value: ((11, 12), (2, 6)),
Locations.ENTRANCE.value: ((16, 16), (11, 11)),
Locations.SPICES.value: ((11, 12), (2, 6)),
}
TILE_SIZE = 32
UNWALKABLES = ["#"]
| MichlF/projects | data_science/supermarket_markov_simulation/config.py | config.py | py | 1,249 | python | en | code | 1 | github-code | 13 |
14610928708 | from datetime import datetime
from typing import List
from fastapi.logger import logger
from pydantic import parse_obj_as
from sqlalchemy.ext.asyncio import AsyncSession
from app import schemas
from app.controllers import note_controller
from app.models import Note
from app.utils import helpers
async def get_by_id(db: AsyncSession, note_id: int) -> schemas.Note:
data = await note_controller.get_by_id(db, note_id)
return data
async def get_all(db: AsyncSession) -> List[schemas.Note]:
data = await note_controller.get_all(db)
return parse_obj_as(List[schemas.Note], data)
async def create_note(db: AsyncSession, data: schemas.NoteCreate) -> Note:
return await note_controller.create(db, data)
async def update_note(db: AsyncSession, data: schemas.NoteUpdate) -> Note:
note = await note_controller.get_by_id(db, data.id)
try:
note.title = data.title
note.note = data.note
note.updated_at = datetime.now()
await helpers.flush_database(db)
except Exception as e:
logger.error(e)
raise RuntimeError("Can not update note")
return note
async def delete_note(db: AsyncSession, note_id: int):
note = await note_controller.get_by_id(db, note_id)
try:
await db.delete(note)
await helpers.flush_database(db)
except Exception as e:
logger.error(e)
raise RuntimeError("Can not delete the note")
def _validate_data(data: schemas.NoteBase):
data.title = data.title.strip()
if len(data.title) < 2:
raise RuntimeError("The name is too short")
if len(data.title) > 254:
raise RuntimeError("The name is too long")
| quangpq/fastapi-async-sqlalchemy | app/api/notes/controller.py | controller.py | py | 1,673 | python | en | code | 0 | github-code | 13 |
9500200508 | class translator:
def deciToRoman(self, num):
val = [1000, 900, 500, 400,100, 90, 50, 40,10, 9, 5, 4,1]
syb = ["M", "CM", "D", "CD","C", "XC", "L", "XL","X", "IX", "V", "IV","I"]
roman = ''
i = 0
while num > 0:
for _ in range(num // val[i]):
roman += syb[i]
num -= val[i]
i += 1
return roman
def romanToDeci(self, s):
roman = {'I':1,'V':5,'X':10,'L':50,'C':100,'D':500,'M':1000,'IV':4,'IX':9,'XL':40,'XC':90,'CD':400,'CM':900}
i = 0
res = 0
while i < len(s):
if i+1<len(s) and s[i:i+2] in roman:
res+=roman[s[i:i+2]]
i+=2
else:
res+=roman[s[i]]
i+=1
return res
num = int(input("Enter number to translate : "))
print(translator().deciToRoman(num))
print(translator().romanToDeci(translator().deciToRoman(num))) | rootkidx/OODataStructure_Lab | python2/python2.1.py | python2.1.py | py | 978 | python | en | code | 0 | github-code | 13 |
12600901531 | import operator
import os
import sys
from pynput import keyboard
ListOne = []
ListTwo = ['*', '*']
def on_press(key):
try:
if key.char == '*':
ListOne.append('*')
else:
os.system(r"C:\Users\Lzhyrifx\AppData\Command\Error\SystemError.vbs")
sys.exit()
if operator.eq(ListOne, ListTwo):
os.system(r"C:\Users\Lzhyrifx\AppData\Command\Error\kill.vbs")
sys.exit()
except AttributeError:
os.system(r"C:\Users\Lzhyrifx\AppData\Command\Error\SystemError.vbs")
sys.exit()
with keyboard.Listener(
on_press=on_press
) as listener:
listener.join()
| Lzhyrifx/ApplyEncryption | Command/Python/Synchronization.py | Synchronization.py | py | 696 | python | en | code | 0 | github-code | 13 |
17316751482 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 20 09:12:57 2017
@author: Beau.Uriona
"""
from os import listdir
from os.path import isfile, join, dirname, abspath
import subprocess as sub
from multiprocessing.dummy import Pool
from datetime import datetime
from string import Template
dt = datetime
date = dt.date
today = dt.now()
this_dir = dirname(abspath(__file__))
master_dir = dirname(dirname(this_dir))
logFile = join(master_dir,'static','runLog.txt')
statusFile = join(master_dir,'static','prodStatus.html')
scriptPath = join(master_dir,'prodScripts')
class DeltaTemplate(Template):
delimiter = "%"
def run(runfile):
with open(runfile, "r") as rnf:
exec(rnf.read())
def writeToLog(strLog):
with open(logFile, 'r+') as loggerFile:
content = loggerFile.read()
loggerFile.seek(0, 0)
loggerFile.write(strLog + '\n\n' + content)
def writeToProdStatus(strLog):
with open(statusFile, 'w') as loggerFile:
loggerFile.write(strLog)
def strfdelta(tdelta, fmt):
d = {"D": tdelta.days}
hours, rem = divmod(tdelta.seconds, 3600)
minutes, seconds = divmod(rem, 60)
d["H"] = '{:02d}'.format(hours)
d["M"] = '{:02d}'.format(minutes)
d["S"] = '{:02d}'.format(seconds)
t = DeltaTemplate(fmt)
return t.substitute(**d)
def stopProduction():
global keepRunning
keepRunning = False
def runProduction():
global keepRunning
keepRunning = True
cycles = 0
while keepRunning:
beginScript = datetime.now()
scriptNames = [f.replace(r'.py',r'') for f in listdir(scriptPath) if
isfile(join(scriptPath, f)) and f[-3: ] == r'.py']
runScripts = [[r'python', join(scriptPath, f)] for
f in listdir(scriptPath) if
isfile(join(scriptPath, f)) and f[-3: ] == r'.py']
print('Begin Scripts @ - ' + datetime.now().strftime('%H:%M:%S'))
print(r'Running scripts located in - ' + scriptPath)
exitCodes = []
pool = Pool(4) # four concurrent commands at a time
for i, exitCode in enumerate(pool.imap(sub.call, runScripts)):
exitCodes.extend([exitCode])
if exitCode == 0:
print("%s - SUCCESS!" %
(runScripts[i][1].replace(scriptPath,r'').replace('\\',r'')))
if exitCode != 0:
print("%s - FAILED: %d" %
(runScripts[i][1].replace(scriptPath,r'').replace('\\',r''),
exitCode))
if cycles == 0: errCodeSum = [0]*(len(exitCodes)+1)
endScript = datetime.now()
scriptRunTime = strfdelta(endScript - beginScript, '%H:%M:%S')
joinStr = '\n\t'
iterLog = joinStr.join(["%s - %s" % f for f in zip(scriptNames,exitCodes)])
logStr = (r'Script Start - ' + beginScript.strftime('%H:%M:%S') +
joinStr + iterLog + '\n' + '\nScript Time - ' + scriptRunTime)
writeToLog(logStr)
htmlStr = r'''<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
"http://www.w3.org/TR/html4/loose.dtd"> <html lang="en"> <head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<title>iChart Production Status</title> </head> <body> <p>REPLACE</p>
</body> </html>'''
htmlStr = htmlStr.replace(r'REPLACE', logStr)
htmlStr = htmlStr.replace('\n', r'<br>')
htmlStr = htmlStr.replace('\t', r'<LI>')
writeToProdStatus(htmlStr)
currErrCodes = list(exitCodes)
for index, err in enumerate(currErrCodes):
if err != 0:
errCodeSum[index] = errCodeSum[index] + 1
else:
errCodeSum[index] = 0
if max(errCodeSum) > 0:
print('Completed with errors @ - ' +
datetime.now().strftime('%H:%M:%S'))
else:
print('Completed with NO errors @ - ' +
datetime.now().strftime('%H:%M:%S'))
print(r'!@#$' + logStr + r'!@#$')
if sum(errCodeSum) != 0 and cycles < 10:
keepRunning = True
else:
keepRunning = False
if cycles == 10:
writeToLog('****After 10 attempts the process was aborted****')
cycles += 1
if __name__ == "__main__":
runProduction() | Sillson/awPlot | static/controlGUI/runProd.py | runProd.py | py | 4,634 | python | en | code | 0 | github-code | 13 |
17254711928 | import os
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--ply', type=str, help="chemin vers la racine du dossier contenant les nuages de points")
parser.add_argument('--png', type=str, help="chemin vers la racine du dossier contenant les images 2D")
parser.add_argument('--intersect', type=bool, help="pour chaque dossier ply, ne garde que les png correspondant à un nuage de points", default=False)
parser.add_argument('--per_cat', type=int, help="nombre d'objet à garder de chaque catégorie (.ply ou dossier de .png)", default=None)
parser.add_argument('--per_obj', type=int, help="nombre de vue (.png) à garder pour chaque objet dossier", default=None)
parser.add_argument('--rm_other', type=bool, help="supprime les fichiers '*.points.ply', .ply2.txt renderings.txt rendering_metadata.txt", default=False)
params = parser.parse_args()
ply = params.ply
png = params.png
intersect = params.intersect
per_cat = params.per_cat
per_obj = params.per_obj
rm_other = params.rm_other
print("Cet outil supprime des images et des nuages de points pour ne garder que ceux souhaités")
print(vars(params))
# ply = "../AtlasNet/data/customShapeNet"
# #png = "../AtlasNet/data/ShapeNetRendering2"
# rm_other = 1
# #intersect = 1
# per_cat = 500
# per_obj = 10
if not ply and not png:
raise ValueError("pour faire quelque chose il faut au moins un dossier")
if ply and not os.path.isdir(ply) or png and not os.path.isdir(png):
raise FileNotFoundError()
if per_cat is not None and per_cat < 10 or per_obj is not None and per_obj < 1:
raise ValueError("On va supprimer beaucoup trop, modifiez ce fichier si vous le voulez vraiment")
if intersect and not (ply and png):
raise ValueError("pour faire l'intersection, il faut les deux dossiers")
print("dossiers trouvés :")
for path in [ply, png]:
if path:
print(path,":")
for category in os.listdir(path):
if os.path.isdir(os.path.join(path, category)):
print(category, end=' ')
print("\n")
print("continuer ? y/n")
if input() != "y":
exit()
if ply:
if intersect:
objets = {}
for category in os.listdir(ply):
path_category = os.path.join(ply, category, "ply")
if os.path.isdir(path_category):
list_objets = []
# construit la liste des objets utiles
# enlève les autres fichiers au passage si demandé
for x in os.listdir(path_category):
if x[-5:] == "2.txt" or x == "*.points.ply":
if rm_other:
os.remove(os.path.join(path_category, x))
continue
list_objets.append(x)
# enlève le surplus
if per_cat and len(list_objets) > per_cat:
list_objets = sorted(list_objets)
for x in list_objets[per_cat:]:
os.remove(os.path.join(path_category, x))
list_objets = list_objets[:per_cat]
if intersect:
# pour chaque catégorie, stocke l'ensemble des noms de fichiers
# enlève le .points.ply
objets[category] = set([x[:-11] for x in list_objets])
if png:
def rm_dir_png(path):
for fichier in os.listdir(os.path.join(path, "rendering")):
os.remove(os.path.join(path, "rendering", fichier))
os.rmdir(os.path.join(path, "rendering"))
os.rmdir(os.path.join(path))
for category in os.listdir(png):
path_category = os.path.join(png, category)
if os.path.isdir(path_category):
list_objets = []
# construit la liste des objets
# enlève les png qui ne correspondent pas à un nuage
for x in os.listdir(path_category):
if intersect and category in objets and x not in objets[category]:
rm_dir_png(os.path.join(path_category, x))
continue
list_objets.append(x)
# enlève le surplus d'objets
if per_cat and len(list_objets) > per_cat:
list_objets = sorted(list_objets)
for x in list_objets[per_cat:]:
rm_dir_png(os.path.join(path_category, x))
list_objets = list_objets[:per_cat]
# enlève le surplus de vues
# enlève les fichiers inutiles
if per_obj or rm_other:
for x in list_objets:
for file in os.listdir(os.path.join(path_category, x, "rendering")):
if file[-4:] == ".png":
if per_obj and int(file[:2]) >= per_obj:
os.remove(os.path.join(path_category, x, "rendering", file))
elif rm_other:
os.remove(os.path.join(path_category, x, "rendering", file))
| keyber/reconstruction3D | source/utils/simplify_database.py | simplify_database.py | py | 5,026 | python | fr | code | 3 | github-code | 13 |
3224357771 | #!/usr/bin/python
"""ansible module for packer init"""
__metaclass__ = type
from pathlib import Path
from ansible.module_utils.basic import AnsibleModule
from mschuchard.general.plugins.module_utils import packer
DOCUMENTATION = r'''
---
module: packer_init
short_description: Module to manage Packer template and config directory initialization.
version_added: "1.0.0"
description: Install all the missing plugins required in a Packer config. Note that Packer does not have a state. This is the first command that should be executed when working with a new or existing template. This command is always safe to run multiple times. Though subsequent runs may give errors, this command will never delete anything.
options:
config_dir:
description: Location of the directory containing the Packer config file.
required: false
default: cwd
type: str
upgrade:
description: Update installed plugins to the latest available version if there is a new higher one. Note that this still considers the version constraint of the config.
required: false
default: false
type: bool
requirements:
- packer >= 1.7.0
author: Matthew Schuchard (@mschuchard)
'''
EXAMPLES = r'''
# initialize directory in /path/to/packer_config_dir
- name: Initialize packer directory in /path/to/packer_config_dir
mschuchard.general.packer_init:
config_dir: /path/to/packer_config_dir
# initialize current directory and upgrade plugins
- name: Initialize current packer directory and upgrade plugins
mschuchard.general.packer_init:
upgrade: true
# initialize directory in /path/to/packer_config_dir and upgrade plugins
- name: Initialize packer directory in /path/to/packer_config_dir and upgrade plugins
mschuchard.general.packer_init:
config_dir: /path/to/packer_config_dir
upgrade: true
'''
RETURN = r'''
command:
description: The raw Packer command executed by Ansible.
type: str
returned: always
sample: 'packer init -machine-readable /home/packer'
'''
def main() -> None:
"""primary function for packer init module"""
# instanstiate ansible module
module = AnsibleModule(
argument_spec={
'config_dir': {'type': 'path', 'required': False, 'default': Path.cwd()},
'upgrade': {'type': 'bool', 'required': False, 'default': False}
},
supports_check_mode=True
)
# initialize
changed: bool = False
config_dir: Path = Path(module.params.get('config_dir'))
# check on optionl upgrade param
flags: list[str] = []
if module.params.get('upgrade'):
flags.append('upgrade')
# determine packer command
command: str = packer.cmd(action='init', flags=flags, target_dir=config_dir)
# exit early for check mode
if module.check_mode:
module.exit_json(changed=False, command=command)
# execute packer
return_code: int
stdout: str
stderr: str
return_code, stdout, stderr = module.run_command(command, cwd=config_dir)
# check idempotence
if 'Installed plugin' in stdout:
changed = True
# post-process
if return_code == 0:
module.exit_json(changed=changed, stdout=stdout, stderr=stderr, command=command)
else:
module.fail_json(
msg=stderr.rstrip(), return_code=return_code, cmd=command,
stdout=stdout, stdout_lines=stdout.splitlines(),
stderr=stderr, stderr_lines=stderr.splitlines())
if __name__ == '__main__':
main()
| mschuchard/ansible.general | plugins/modules/packer_init.py | packer_init.py | py | 3,528 | python | en | code | 0 | github-code | 13 |
14275197886 | # Adapted from tensorflow_CTC_example.
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.ops import ctc_ops as ctc
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops.rnn import bidirectional_rnn
import numpy as np
import prettytensor as pt
import json
import sys
import os
from model import CtcModel
with open("model_input.json", "r") as fp:
model_input = json.load(fp)
def target_list_to_sparse_tensor(targetList):
'''make tensorflow SparseTensor from list of targets, with each element
in the list being a list or array with the values of the target sequence
(e.g., the integer values of a character map for an ASR target string)
See https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/ctc/ctc_loss_op_test.py
for example of SparseTensor format'''
indices = []
vals = []
for tI, target in enumerate(targetList):
for seqI, val in enumerate(target):
indices.append([tI, seqI])
vals.append(val)
shape = [len(targetList), np.asarray(indices).max(0)[1]+1]
return (np.array(indices), np.array(vals), np.array(shape))
def test_edit_distance():
graph = tf.Graph()
with graph.as_default():
truth = tf.sparse_placeholder(tf.int32)
hyp = tf.sparse_placeholder(tf.int32)
editDist = tf.edit_distance(hyp, truth, normalize=False)
with tf.Session(graph=graph) as session:
truthTest = sparse_tensor_feed([[0,1,2], [0,1,2,3,4]])
hypTest = sparse_tensor_feed([[3,4,5], [0,1,2,2]])
feedDict = {truth: truthTest, hyp: hypTest}
dist = session.run([editDist], feed_dict=feedDict)
print(dist)
def data_lists_to_batches(inputList, targetList, batchSize):
'''Takes a list of input matrices and a list of target arrays and returns
a list of batches, with each batch being a 3-element tuple of inputs,
targets, and sequence lengths.
inputList: list of 2-d numpy arrays with dimensions nFeatures x timesteps
targetList: list of 1-d arrays or lists of ints
batchSize: int indicating number of inputs/targets per batch
returns: dataBatches: list of batch data tuples, where each batch tuple (inputs, targets, seqLengths) consists of
inputs = 3-d array w/ shape batchSize x nTimeSteps x nFeatures
targets = tuple required as input for SparseTensor
seqLengths = 1-d array with int number of timesteps for each sample in batch
maxSteps: maximum number of time steps across all samples'''
# import pdb; pdb.set_trace()
assert len(inputList) == len(targetList)
nFeatures = inputList[0].shape[0]
maxSteps = 0
for inp in inputList:
maxSteps = max(maxSteps, inp.shape[1])
randIxs = np.random.permutation(len(inputList))
start, end = (0, batchSize)
dataBatches = []
while end <= len(inputList):
batchSeqLengths = np.zeros(batchSize)
for batchI, origI in enumerate(randIxs[start:end]):
batchSeqLengths[batchI] = inputList[origI].shape[-1]
batchInputs = np.zeros((batchSize, maxSteps, nFeatures))
batchTargetList = []
for batchI, origI in enumerate(randIxs[start:end]):
padSecs = maxSteps - inputList[origI].shape[1]
assert padSecs == 0
batchInputs[batchI,:,:] = inputList[origI].T
#batchInputs[:,batchI,:] = np.pad(inputList[origI].T, ((0,padSecs),(0,0)),
# 'constant', constant_values=0)
batchTargetList.append(targetList[origI])
dataBatches.append((batchInputs, target_list_to_sparse_tensor(batchTargetList),
batchSeqLengths))
start += batchSize
end += batchSize
return (dataBatches, maxSteps)
def load_batched_data(model_input, batchSize):
'''returns 3-element tuple: batched data (list), max # of time steps (int), and
total number of samples (int)'''
CLASSES_DICT = {'0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9, '.': 10, ',': 10}
inputs = []
targets = []
def add_sample(img_path, target_str):
if not target_str:
return
inputs.append(np.load(img_path))
targets.append(np.array([CLASSES_DICT[x] for x in target_str]))
for record in model_input:
# import cv2
# cv2.imwrite('/tmp/dd/' + record['income'] + ".png", np.load(record['first_bin']))
add_sample(record['first_bin'], record['income'])
add_sample(record['second_bin'], record['family_income'])
# import pdb; pdb.set_trace()
# return
return data_lists_to_batches(inputs, targets, batchSize) + (len(inputs), )
# INPUT_PATH = './sample_data/mfcc' #directory of MFCC nFeatures x nFrames 2-D array .npy files
# TARGET_PATH = './sample_data/char_y/' #directory of nCharacters 1-D array .npy files
####Learning Parameters
learningRate = 0.001
momentum = 0.9
nEpochs = 3000
batchSize = 4
####Network Parameters
nFeatures = 22 #22 is the height of the image
nHidden = 128
nClasses = 12 #10 digits plus dot plus the "blank" for CTC
####Load data
print('Loading data')
batchedData, maxTimeSteps, totalN = load_batched_data(model_input, batchSize)
####Define graph
print('Defining model')
ctc_model = CtcModel(maxTimeSteps, nFeatures, nClasses)
####Run session
with tf.Session(graph=ctc_model.graph) as session:
print('Initializing')
tf.initialize_all_variables().run()
saver = tf.train.Saver()
if os.path.isfile('./train/save'):
print("Restoring state...")
saver.restore(session, './train/save')
if False:
# print("Restoring state...")
# saver.restore(session, './train/save')
# import pdb; pdb.set_trace()
batchedDataPred, _, _ = load_batched_data([{
"income": ".",
"family_income": ".",
"first_bin": "/Users/tilarids/dev/decl/extract_img_data/0278d69820395cf130f098f79b46caa62023627a9a7362295e2c5489.pdf.1.png.first.bin.npy",
"second_bin": "/Users/tilarids/dev/decl/extract_img_data/0278d69820395cf130f098f79b46caa62023627a9a7362295e2c5489.pdf.1.png.second.bin.npy"
}] * batchSize, batchSize)
batchInputs, batchTargetSparse, batchSeqLengths = batchedDataPred[0]
pred = ctc_model.run_predictions(session, batchInputs, batchSeqLengths)
predDense = session.run(tf.sparse_to_dense(pred.indices, pred.shape, pred.values))
# print("Prediction:", pred.values)
print("Prediction:", predDense)
print("Expected prediction:", batchTargetSparse[1])
sys.exit(0)
summary_writer = tf.train.SummaryWriter('/tmp/tensorboard/run2', session.graph)
for epoch in range(nEpochs):
print('Epoch', epoch+1, '...')
batchErrors = np.zeros(len(batchedData))
batchRandIxs = np.random.permutation(len(batchedData)) #randomize batch order
for batch, batchOrigI in enumerate(batchRandIxs):
batchInputs, batchTargetSparse, batchSeqLengths = batchedData[batchOrigI]
_, l, er, lmt, pr, summary = ctc_model.run_train_step(session, batchInputs, batchSeqLengths, batchTargetSparse)
print(np.unique(lmt)) #print unique argmax values of first sample in batch; should be blank for a while, then spit out target values
if (batch % 1) == 0:
print('Minibatch', batch, '/', batchOrigI, 'loss:', l)
print('Minibatch', batch, '/', batchOrigI, 'error rate:', er)
batchErrors[batch] = er*len(batchSeqLengths)
summary_writer.add_summary(summary, epoch * len(batchedData) + batch)
epochErrorRate = batchErrors.sum() / totalN
print('Epoch', epoch+1, 'error rate:', epochErrorRate)
if 1 == epoch % 50:
print("Saving state...")
saver.save(session, './train/save')
| tilarids/declear | train_ctc.py | train_ctc.py | py | 8,026 | python | en | code | 2 | github-code | 13 |
22011576118 | def price_comparison(request):
"""
Compare the price of different supermarkets
on a particular item
"""
results = []
item = ''
if request.method == "POST":
form = ComparisonForm(request.POST)
if form.is_valid():
retailers = form.cleaned_data.get('retailer')
item = form.cleaned_data.get('itemname')
for retailer in retailers:
costs = Costs.objects.filter(
retailer=retailer,
itemname__icontains=item).values('unitcost', 'itemname').annotate(
frequency = Count('unitcost')).order_by('-frequency')
temp = {
'unitcost': 0,
'frequency':0,
}
for cost in costs:
if cost['frequency'] > temp['frequency']:
temp['unitcost'] = cost['unitcost']
temp['frequency'] = cost['frequency']
results.append({'unitcost': temp['unitcost'], 'retailer': retailer})
#print results
else:
form = ComparisonForm()
return render(request, 'food/price_comparison.html', {'form': form, 'item': item, 'results': results }) | GithakaMbui/consumer-guide | food/pricecomparison_backup.py | pricecomparison_backup.py | py | 1,001 | python | en | code | 0 | github-code | 13 |
35710207522 | from kivy.config import ConfigParser
import xml.etree.ElementTree
import threading
from pathlib import Path
import os.path
from datetime import datetime
from osmap.index import Index
class Osmap:
konfig = ConfigParser()
index = Index()
def nacitajnastavenia(self):
konfig_cesta = Path('nastavenia/nastavenia.ini')
self.konfig_json_cesta = Path('nastavenia/nastavenia.json')
self.konfig_json_priecinky = Path('nastavenia/nastavenia_priecinkov.json')
if konfig_cesta.is_file() and self.konfig_json_cesta.is_file() and self.konfig_json_priecinky:
self.konfig.read(str(konfig_cesta))
direktoria = Path(self.konfig.get('Hlavne', 'priecinok'))
if not direktoria.exists():
self.konfig.set('Hlavne', 'priecinok', Path('').resolve())
self.konfig.write()
else:
raise FileNotFoundError
def stiahnizoznam(self, kolbek, koniec, stiahnut=False):
def makaj():
adresa = self.konfig.get('Hlavne', 'adresazoznamov')
self.kam = Path(self.konfig.get('Hlavne', 'priecinok')) / Path(self.konfig.get('Hlavne', 'menozoznamov'))
frekv_stahovania_zoznamu = self.konfig.get('Hlavne', 'frekvencia_stahovania_zoznamu')
if stiahnut or not self.kam.exists() or (self.dni_zoznamu() > int(frekv_stahovania_zoznamu) > 0):
print('Stiahol by som novy zoznam')
# urlretrieve(adresa, self.kam , kolbek)
koniec()
vlakno = threading.Thread(target=makaj)
vlakno.start()
def dni_zoznamu(self):
vek = datetime.now() - datetime.fromtimestamp(os.path.getmtime(str(self.kam)))
return vek.days
def spravstrom(self, koniec):
# def makaj():
self.strom = xml.etree.ElementTree.parse(self.kam)
self.koren = self.strom.getroot()
self.spravzoznamy()
self.index.spravstrom()
koniec()
# vlakno = threading.Thread(target=makaj)
# vlakno.start()
def spravzoznamy(self):
for dieta in self.koren:
self.index.pridajzaznam(dieta)
@property
def typy(self):
return list(self.index.zoznamtypov)
| martincivan/OsMap | osmap/osmap.py | osmap.py | py | 2,221 | python | sl | code | 0 | github-code | 13 |
20814999552 | import json
import plotly.graph_objects as go
with open('response.json') as f:
data = json.load(f)
x_values = []
y_values = []
# Loop through each text annotation
for annotation in data['responses'][0]['textAnnotations']:
vertices = annotation['boundingPoly']['vertices']
# Loop through each vertex
for vertex in vertices:
x_values.append(vertex['x'])
y_values.append((vertices[0]['y'] + vertices[2]['y']) / 2) # Taking average of y-coordinates of first and third vertex
fig = go.Figure()
# Add histogram data for x coordinates
fig.add_trace(go.Histogram(
x=x_values,
name='x coordinates',
xbins=dict(
start=min(x_values),
end=max(x_values),
size=1 # Bin size of 1 pixel
),
marker_color='#EB89B5',
opacity=0.75
))
# Add histogram data for y coordinates
fig.add_trace(go.Histogram(
x=y_values,
name='y coordinates',
xbins=dict(
start=min(y_values),
end=max(y_values),
size=1 # Bin size of 1 pixel
),
marker_color='#330C73',
opacity=0.75
))
# Overlay histograms
fig.update_layout(
barmode='overlay',
title_text='Histogram of x and y coordinates',
xaxis_title_text='Value',
yaxis_title_text='Count',
bargap=0.2,
bargroupgap=0.1
)
fig.show()
| sazzadi-r14/textblock-test | advhist.py | advhist.py | py | 1,288 | python | en | code | 0 | github-code | 13 |
9225059235 | """Game Logic for the Progression Brain Game."""
from random import randint
INTRO = 'What number is missing in the progression?'
def make_progression(step, starting_num, missing_spot, progression_length):
"""Method for making a progression string and its missed answer."""
current_spot = 0
current_num = starting_num
progression_str = ''
while current_spot < progression_length:
if current_spot == missing_spot:
progression_str += ' ..'
missing_num = str(current_num)
else:
progression_str += ' ' + str(current_num)
current_num += step
current_spot += 1
return missing_num, str.strip(progression_str)
def get_answer_and_question():
"""Get the answer (missed num) and the question (progression string)."""
step = randint(1, 10)
start_num = randint(0, 30)
missed_spot = randint(0, 9)
progression_length = 10
return make_progression(step, start_num, missed_spot, progression_length)
| alienflakes/python-project-lvl1 | brain_games/games/progression.py | progression.py | py | 1,010 | python | en | code | 0 | github-code | 13 |
39647350620 | import os
import torch
import pandas as pd
import numpy as np
from tqdm import tqdm
from torch.autograd import Variable
from torchvision import transforms
from .dataGenerator import nii_loader, get_patch
from ..helpers import utils
from tqdm import tqdm
def __get_whole_tumor__(data):
return (data > 0)*(data < 4)
def __get_tumor_core__(data):
return np.logical_or(data == 1, data == 3)
def __get_enhancing_tumor__(data):
return data == 3
def _get_dice_score_(prediction, ground_truth):
masks = (__get_whole_tumor__, __get_tumor_core__, __get_enhancing_tumor__)
p = np.uint8(prediction)
gt = np.uint8(ground_truth)
wt, tc, et = [2*np.sum(func(p)*func(gt)) / (np.sum(func(p)) + np.sum(func(gt))+1e-6) for func in masks]
return wt, tc, et
def GenerateCSV3D(model,
dataset_path,
logs_root,
iteration = 0,
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")):
"""
Function is generate feedback csv which will be used
in hardminning and data generator
custom function
"""
model.eval()
brainRegion = []; backgroundRegion = [];
ETRegion = []; TCRegion = []; WTRegion = []
ETDice = []; TCDice = []; WTDice = []
path = []; coordinate = [];
def _GenerateSegmentation_(spath, vol, seg, size = 64, nclasses = 5):
"""
output of 3D tiramisu model (tir3Dnet)
N = patch size during inference
"""
shape = vol['t1'].shape # to exclude batch_size
final_prediction = np.zeros((nclasses, shape[0], shape[1], shape[2]))
x_min, x_max, y_min, y_max, z_min, z_max = 0, shape[0], 0, shape[1], 0, shape[2]
x_min, x_max, y_min, y_max, z_min, z_max = x_min, min(shape[0] - size, x_max), y_min, min(shape[1] - size, y_max), z_min, min(shape[2] - size, z_max)
with torch.no_grad():
for x in tqdm(range(x_min, x_max, size//2)):
for y in range(y_min, y_max, size//2):
for z in range(z_min, z_max, size//2):
data, mask = get_patch(vol, seg, coordinate = (x, y, z), size = size)
data = Variable(torch.from_numpy(data).unsqueeze(0)).to(device).float()
pred = torch.nn.functional.softmax(model(data).detach().cpu())
pred = pred.data.numpy()
final_prediction[:, x:x + size, y:y + size, z:z + size] = pred[0]
# Logs update
pred = np.argmax(pred[0], axis=0)
wt, tc, et = _get_dice_score_(pred, mask)
coordinate.append((x, y, z))
path.append(spath)
backgroundRegion.append(np.mean(mask == 0))
WTRegion.append(np.mean(__get_whole_tumor__(mask)))
ETRegion.append(np.mean(__get_enhancing_tumor__(mask)))
TCRegion.append(np.mean(__get_tumor_core__(mask)))
brainRegion.append(np.mean(mask > 0))
ETDice.append(et); WTDice.append(wt); TCDice.append(tc)
final_prediction = utils.convert5class_logitsto_4class(final_prediction)
final_prediction = np.argmax(final_prediction, axis=0).reshape((shape[0], shape[1],shape[2]))
return final_prediction
if iteration == 0:
subjects = [sub for sub in os.listdir(dataset_path) if not os.path.isfile(os.path.join(dataset_path, sub))]
training_subjects = subjects[:int(.8*len(subjects))]
validation_subjects = subjects[int(.8*len(subjects)):]
data_splits = [training_subjects, validation_subjects]
else :
training_subjects = pd.read_csv(os.path.join(logs_root, 'csv/training.csv'))['path'].values
training_subjects = [sub.split('/')[-1] for sub in training_subjects]
data_splits = [np.unique(training_subjects)]
for i, subjects in enumerate(data_splits):
for subject in tqdm(subjects):
print(subject)
spath = {}
subject_path = os.path.join(dataset_path, subject)
spath['flair'] = os.path.join(subject_path, subject + '_flair.nii.gz')
spath['t1ce'] = os.path.join(subject_path, subject + '_t1ce.nii.gz')
spath['seg'] = os.path.join(subject_path, subject + '_seg.nii.gz')
spath['t1'] = os.path.join(subject_path, subject + '_t1.nii.gz')
spath['t2'] = os.path.join(subject_path, subject + '_t2.nii.gz')
spath['mask'] = os.path.join(subject_path, 'mask.nii.gz')
vol, seg, affine = nii_loader(spath)
predictions = _GenerateSegmentation_(subject_path, vol, seg, size = 64, nclasses = 5)
utils.save_volume(predictions, affine, os.path.join(subject_path, 'DeepBrainSeg_Prediction_iteration_{}'.format(iteration)))
dataFrame = pd.DataFrame()
dataFrame['path'] = path
dataFrame['ETRegion'] = ETRegion
dataFrame['TCRegion'] = TCRegion
dataFrame['WTRegion'] = WTRegion
dataFrame['brain'] = brainRegion
dataFrame['ETdice'] = ETDice
dataFrame['WTdice'] = WTDice
dataFrame['TCdice'] = TCDice
dataFrame['background'] = backgroundRegion
dataFrame['coordinate'] = coordinate
if iteration == 0: csv_root = os.path.join(logs_root, 'csv')
else: csv_root = os.path.join(logs_root, 'csv/iteration_{}'.format(iteration))
os.makedirs(csv_root, exist_ok=True)
if i == 0: save_path = os.path.join(csv_root, 'training.csv')
else: save_path = os.path.join(csv_root, 'validation.csv')
dataFrame.to_csv(save_path)
if iteration == 0:
return os.path.join(csv_root, 'training.csv'), os.path.join(csv_root, 'validation.csv')
else:
return save_path
if __name__ == '__main__':
T3Dnclasses = 5
from os.path import expanduser
home = expanduser("~")
ckpt_tir3D = os.path.join(home, '.DeepBrainSeg/BestModels/Tramisu_3D_FC57_best_acc.pth.tar')
from .models.modelTir3D import FCDenseNet57
Tir3Dnet = FCDenseNet57(T3Dnclasses)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
ckpt = torch.load(ckpt_tir3D, map_location=device)
Tir3Dnet.load_state_dict(ckpt['state_dict'])
print ("================================== TIRNET3D Loaded =================================")
Tir3Dnet = Tir3Dnet.to(device)
GenerateCSV(Tir3Dnet, '../../sample_volume/brats', '../../Logs/')
| koriavinash1/DeepBrainSeg | DeepBrainSeg/tumor/feedBack.py | feedBack.py | py | 6,678 | python | en | code | 175 | github-code | 13 |
23154936493 | #!/usr/bin/env python
import rospy
from std_msgs.msg import String,Float32MultiArray
from geometry_msgs.msg import PoseStamped
from mavros_msgs.msg import RCOut
import roslib
import rospy
import tf
import argparse
class savePressureCVS():
def __init__(self):
rospy.init_node('savePressureCVS', anonymous=True)
rospy.Subscriber("/mavros/rc/out", RCOut, self.getRCOut)
rospy.Subscriber("/pressureBMP388_array", Float32MultiArray, self.getPressure)
rate = rospy.Rate(50)
self.file = open('pressure_test.csv','w')
self.file.write('Time(s), RCOut(x8), pressure(x5) \n')
sc = 0
sp = 0
rospy.sleep(2)
self.t0 = rospy.get_time()
while not rospy.is_shutdown():
hello_str = "saving to cvs running for %s" % (rospy.get_time()-self.t0)
rospy.loginfo(hello_str)
self.saveDataCVS()
rate.sleep()
def getRCOut(self,data):
self.rc = data.channels
#rospy.loginfo(str(self.rc))
def getPressure(self,data):
self.pressure = data.data
def saveDataCVS(self):
self.file.write("%5.5f, " % (rospy.get_time()-self.t0) )
self.file.write(", ".join(str(elem) for elem in self.rc) )
self.file.write(", ".join(str(elem) for elem in self.pressure) + "\n")
#self.file.write('%5.3f, 5.3f, 5.3f, 5.3f, 5.3f,5.3f,5.3f,5.3f,' % (self.rc))
#self.file.write('%5.3f \n' % (self.data))
def shutdown(self):
self.file.close()
rospy.loginfo("Stop savePressureCVS")
rospy.sleep(1)
if __name__ == '__main__':
try:
gotoop = savePressureCVS()
except rospy.ROSInterruptException:
pass
| PastorD/bintel | scripts/save_pressure_cvs.py | save_pressure_cvs.py | py | 1,794 | python | en | code | 3 | github-code | 13 |
43175667599 | #!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
from django.conf.urls.defaults import *
import views
urlpatterns = patterns('',
url(r'^tags$', views.index),
url(r'^tags/add$', views.add_tag, name="add-tag"),
url(r'^tags/(?P<pk>\d+)$', views.edit_tag, name="view-tag"),
#url(r'^groups/$', views.index),
#url(r'^groups/add$', views.add_group),
#url(r'^groups/(?P<pk>\d+)$', views.edit_group),
)
| oluka/mapping_rapidsms | apps/tags/urls.py | urls.py | py | 469 | python | en | code | 3 | github-code | 13 |
1596999338 | import os
import sys
import pandas as pd
import sqlite3
import psycopg2
sys.path.append(os.getcwd()+'\\src')
import db_interface
def main():
conn_sqlite3 = sqlite3.connect("temp/data.db")
query = "SELECT * FROM data"
df = pd.read_sql_query(query, conn_sqlite3)
df = df.iloc[:,1:]
print(df.head())
first_row = df.iloc[0, :]
list_of_values = first_row.tolist()
my_db = db_interface.DB_interface("FINANCE_DB")
with my_db:
my_db.replace_table_slow("financials", df)
my_db.save_db()
if __name__ == "__main__":
main() | maj-oliveira/quant-finance-strategy | temp/insert_into_db.py | insert_into_db.py | py | 582 | python | en | code | 0 | github-code | 13 |
35180707730 | import ctypes
from random import randint, random
import games.utils.utils as utils
class Matrix(ctypes.Structure):
"""
Класс Matrix описывает одноименную структуру в С.
Класс имеет поля:
- rows - количество строк матрицы
- columns - количество столбцов матрицы
- matrix - указатель на начало матрицы.
"""
_fields_ = [("rows", ctypes.c_int),
("columns", ctypes.c_int),
("matrix", ctypes.POINTER(ctypes.POINTER(ctypes.c_int)))]
def __init__(self, rows, columns):
"""
Конструктор для класса Matrix
"""
self.rows = rows
self.columns = columns
self.matrix = init_matrix(rows, columns)
def fill_random_cell(game_field, number, rows, columns):
"""
Заполнение передаваемой цифрой случайной и пустой клетки игрового поля.
"""
i = randint(0, rows - 1)
j = randint(0, columns - 1)
while game_field[i][j] != 0:
i = randint(0, rows - 1)
j = randint(0, columns - 1)
game_field[i][j] = number
def get_random_numb():
"""
Получение цифры 2 или 4 для дальнейшего спавна
этой цифры на игровом поле.
"""
return 2 if random() > 0.1 else 4
def init_matrix(rows, columns):
"""
Заполнение матрицы нулями.
"""
c_int_p = ctypes.POINTER(ctypes.c_int)
value_array = ctypes.c_int * columns
pointer_array = c_int_p * rows
matrix_pointer = pointer_array()
for i in range(rows):
matrix_pointer[i] = value_array()
for j in range(columns):
matrix_pointer[i][j] = 0
return matrix_pointer
def check_end_game(game_field):
"""
Проверка поля на возможность хода.
"""
for i in range(game_field.rows - 1):
for j in range(game_field.rows - 1):
if game_field.matrix[i][j] == game_field.matrix[i + 1][j] or \
game_field.matrix[i][j + 1] == game_field.matrix[i][j]:
return False
for i in range(game_field.rows):
for j in range(game_field.rows):
if game_field.matrix[i][j] == 0:
return False
for i in range(game_field.rows - 1):
if game_field.matrix[game_field.rows - 1][i] == \
game_field.matrix[game_field.rows - 1][i + 1]:
return False
for i in range(game_field.rows - 1):
if game_field.matrix[i][game_field.rows - 1] == \
game_field.matrix[i + 1][game_field.rows - 1]:
return False
return True
def reverse_field(game_field):
"""
Переворачивает каждую строку матрицы.
"""
for i in range(game_field.rows):
for j in range(game_field.columns // 2):
temp = game_field.matrix[i][j]
game_field.matrix[i][j] = game_field.matrix[i][game_field.columns - j - 1]
game_field.matrix[i][game_field.columns - j - 1] = temp
return game_field
def transpose_field(game_field):
"""
Транспонирует матрицу.
"""
for i in range(game_field.rows):
for j in range(game_field.columns - i):
temp = game_field.matrix[i][j + i]
game_field.matrix[i][j + i] = game_field.matrix[j + i][i]
game_field.matrix[j + i][i] = temp
return game_field
def shift_field(game_field):
"""
Сдвиг ненулевых ячеек игрового поля в левую сторону.
"""
new_matrix = Matrix(game_field.rows, game_field.columns)
for i in range(game_field.rows):
nonzero_elements = 0
for j in range(game_field.columns):
if game_field.matrix[i][j] != 0:
new_matrix.matrix[i][nonzero_elements] = game_field.matrix[i][j]
nonzero_elements += 1
shift_is_done = is_fields_identical(game_field, new_matrix)
return new_matrix, not shift_is_done
def merge_field_cells(game_field):
"""
Соединение соседних ячеек игрового, если они равны.
"""
merge_is_done = False
for i in range(game_field.rows):
for j in range(game_field.columns - 1):
if game_field.matrix[i][j] == game_field.matrix[i][j + 1] \
and game_field.matrix[i][j] != 0:
game_field.matrix[i][j] *= 2
game_field.matrix[i][j + 1] = 0
merge_is_done = True
return game_field, merge_is_done
def update_field(game_field, field_location):
"""
Обновление матрицы взависимости от сделаного хода игроком.
Сначала матрица приводится к такому виду, чтобы любой ход
можно было обработать как ход влево (манипуляции с транспонированием и реверсом строк).
После обновления матрицы, она приводится к исходному виду. (транспонирование и/или реверс)
"""
if field_location is not None:
game_field = field_location(game_field)
game_field, shift_is_done = shift_field(game_field)
game_field, merge_is_done = merge_field_cells(game_field)
game_field, _ = shift_field(game_field)
if field_location is not None:
game_field = field_location(game_field)
return game_field, shift_is_done or merge_is_done
def make_move(move, game_field):
"""
Создание хода влево, вправо, вверх или вниз, в зависимости от переданного
игроком значения (l, r, u, d соответственно).
В случае невалидного переданного значения, функция возвращает исходную матрицу.
"""
is_done = False
if move == 'l':
game_field, is_done = update_field(game_field, None)
elif move == 'r':
game_field, is_done = update_field(game_field, reverse_field)
elif move == 'u':
game_field, is_done = update_field(game_field, transpose_field)
elif move == 'd':
game_field, is_done = update_field(
game_field, lambda x: reverse_field(transpose_field(x)))
return game_field, is_done
def is_fields_identical(game_field, matrix_field_copy):
"""
Проверка на равенство двух матриц.
"""
for i in range(game_field.rows):
for j in range(game_field.columns):
if game_field.matrix[i][j] != matrix_field_copy.matrix[i][j]:
return False
return True
def copy_field(game_field, matrix_field_copy):
"""
Копирование ячеек исходного игрового поля в поле-копию, для сравнения
на испорченость матрицы после вызова функции игрока.
"""
for i in range(game_field.rows):
for j in range(game_field.columns):
matrix_field_copy.matrix[i][j] = game_field.matrix[i][j]
return matrix_field_copy
def scoring(game_field):
"""
Подсчёт итоговой суммы очков игрока.
"""
score = 0
for i in range(game_field.rows):
for j in range(game_field.columns):
score += game_field.matrix[i][j]
return score
def print_field(game_field, player_name, score, field_size):
"""
Печать итогового состояния игрового поля и количество набранных очков.
"""
print(f"PLAYER: {player_name} SCORE: {score}")
for i in range(field_size):
for j in range(field_size):
print(game_field.matrix[i][j], end="\t")
print("")
def ctypes_wrapper(player_lib, move, game_field):
"""
Обертка для отловки segmentation fault.
"""
move.value = player_lib.teen48game(game_field).decode('utf-8')
def start_teen48game_competition(players_info, field_size):
"""
Создание игрового поля и запуск игры для каждого
игрока, подсчёт его очков. Если количество очков менее, чем
было набрано в прошлый раз, то очки не обновляются.
"""
if field_size == 4:
utils.redirect_ctypes_stdout()
results = []
for player in players_info:
if player[0] == "NULL":
results.append(utils.GameResult.no_result)
continue
player_lib = ctypes.CDLL(player[0])
player_lib.teen48game.argtypes = [Matrix]
player_lib.teen48game.restype = ctypes.c_char
game_field = Matrix(field_size, field_size)
game_field_copy = Matrix(field_size, field_size)
fill_random_cell(game_field.matrix, get_random_numb(),
game_field.rows, game_field.columns)
fill_random_cell(game_field.matrix, get_random_numb(),
game_field.rows, game_field.columns)
game_is_end = False
prev_move = "_"
while not game_is_end:
copy_field(game_field, game_field_copy)
move = utils.call_libary(
player_lib, ctypes_wrapper, ctypes.c_wchar, utils.Error.char_segfault, game_field
)
game_field, is_done = make_move(move, game_field)
if is_done:
rand_numb = get_random_numb()
fill_random_cell(game_field.matrix, rand_numb,
game_field.rows, game_field.columns)
game_is_end = check_end_game(game_field)
if move == utils.Error.char_segfault:
print("▼ This player caused segmentation fault. ▼")
game_is_end = True
if prev_move == move and not is_done:
print(
f"Two identical moves that do not change the field. Move: {move}")
game_is_end = True
prev_move = move
score = scoring(game_field)
results.append(score if score > player[1] else player[1])
print_field(game_field, utils.parsing_name(
player[0]), score, field_size)
return results
if __name__ == "__main__":
start_teen48game_competition(
[("games/teen48/teen48lib.so", 0), ("NULL", 1000)], 4)
| iu7og/iu7games | games/teen48/teen48_runner.py | teen48_runner.py | py | 10,951 | python | ru | code | 3 | github-code | 13 |
1069966553 | trace0 = go.Scatter(
x = df.columns,
y = df.loc['Netherlands'],
mode = 'lines',
name = 'Netherlands',
line = dict(
color = 'rgb(255, 127, 0)'
)
)
trace1 = go.Scatter(
x = df.columns,
y = df.loc['France'],
mode = 'lines+markers',
name = 'France',
line = dict(
color = 'rgb(0, 0, 255)'
)
)
fig = go.Figure(data = [trace0, trace1])
fig.update_layout(
title = 'GDP per-capita for the Netherlands and France',
xaxis_title = 'Year',
yaxis_title = 'GDP per-capita'
)
fig.show()
| ualberta-rcg/python-plotting | notebooks/solutions/plotly-scatter-netherlands-france.py | plotly-scatter-netherlands-france.py | py | 548 | python | en | code | 5 | github-code | 13 |
32547106375 | # to use the torchvision.datasets.ImageFolder函数,所以使用这个工具将ava中的style_list转化为正常的代码
import os
import shutil
dirpath = '../data/ava_dataset/'
imagepath = dirpath + 'images/'
style_dir_path = dirpath + 'style_image_lists/'
train_id = style_dir_path + 'train.jpgl'
train_tag = style_dir_path + 'train.lab'
test_id = style_dir_path + 'test.jpgl'
test_tags = style_dir_path + 'test.multilab'
out_dir = '../data/ava_style/'
if not os.path.exists(out_dir):
os.mkdir(out_dir)
out_train_dir = out_dir + 'train/'
out_test_dir = out_dir + 'test/'
if not os.path.exists(out_dir+'train/'):
os.mkdir(out_dir+'train/')
if not os.path.exists(out_dir+'test/'):
os.mkdir(out_dir+'test/')
fp_id_name = open(style_dir_path + 'styles.txt')
tag2name = dict()
for line in fp_id_name.readlines():
fields = line.strip().split(' ')
tag2name[fields[0]] = fields[1] # str 2 str
if not os.path.exists(out_dir+'train/'+fields[1]):
os.mkdir(out_dir+'train/' + fields[1])
if not os.path.exists(out_dir+'test/'+fields[1]):
os.mkdir(out_dir+'test/' + fields[1])
# copy the images to the corresponding directory : train set
trainids = open(train_id).readlines()
traintags = open(train_tag).readlines()
for iid, tag in zip(trainids, traintags):
iid, tag = iid.strip(), tag.strip()
tagname = tag2name[tag]
shutil.copyfile(imagepath + iid + '.jpg', out_train_dir + tagname + '/' + iid + '.jpg')
| 2742195759/xkcv_backbone | tools/change_ava_format_to_imagefolder.py | change_ava_format_to_imagefolder.py | py | 1,474 | python | en | code | 0 | github-code | 13 |
29078749494 | from ckeditor_uploader.widgets import CKEditorUploadingWidget
from django import forms
from django.contrib import admin
from django.utils.safestring import mark_safe
from photo.admin import wrapper_photo
from server.utils.handler import ExceptionHandler
from .models import Cake
class CakeAdminForm(forms.ModelForm):
text = forms.CharField(widget=CKEditorUploadingWidget(), label='Описание')
class Meta:
model = Cake
fields = '__all__'
class CakeAdmin(admin.ModelAdmin):
form = CakeAdminForm
list_display = ('name', 'text', 'price',
'created', 'created_by', 'updated', 'updated_by', 'sample_photos')
list_display_links = ('name', 'text')
list_editable = ('price',)
readonly_fields = ['sample_photos', 'created', 'created_by', 'updated', 'updated_by']
search_fields = ('name', 'text')
save_on_top = True
def sample_photos(self, obj_cake):
"""Показываем картинку. https://books.agiliq.com/projects/django-admin-cookbook/en/latest/imagefield.html"""
photos_all = obj_cake.photos.all()
html = ''
for obj in photos_all:
try:
width = obj.url.width
height = obj.url.height
url = obj.url.url
except Exception as err:
err_h = ExceptionHandler(err)
html += '<p>%s <strong>Ошибка: %s</strong></p>' % (obj.url.name, err_h)
continue
html += wrapper_photo(80, 80, width, height, url)
return mark_safe(html)
sample_photos.short_description = "Фото"
admin.site.register(Cake, CakeAdmin)
| AbbasIsaev/DjangoCakes | cake/admin.py | admin.py | py | 1,670 | python | en | code | 0 | github-code | 13 |
35266828276 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
iris_df = load_iris()
# print(iris_df.head())
data = load_iris()
# print(data.feature_names)
# print(data.target_names)
# print(data.target)
X = data.data
# print(X.shape )
y = data.target
# print(y.shape)
y = y.reshape(-1, 1)
# print(y.shape)
print(plt.figure(figsize=(18,8),dpi=100) )
model = LinearRegression()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
model.fit(X_train, y_train)
ypred = model.predict(X_test)
# print("Age: ", X_test.values[0])
print("Accuracy: ", model.score(X_test, y_test))
plt.scatter(X.T[0],X.T[2])
plt.title('IRIS Petal and sepal length', fontsize=20)
plt.ylabel('Petal Length')
plt.xlabel('sepal length')
plt.show() | ayuSh0614/Oohooo-DSA-cpp | rough.py | rough.py | py | 900 | python | en | code | 0 | github-code | 13 |
70145146898 | """Kernels used to calculate equivalent width of spectral lines"""
from jax import jit as jjit
from jax import numpy as jnp
@jjit
def _get_quadfit_weights(x, x1, x2, x3, x4):
msk_lo = (x >= x1) & (x <= x2)
msk_hi = (x >= x3) & (x <= x4)
msk = msk_lo | msk_hi
return jnp.where(msk, 1, 0)
@jjit
def _get_integration_weights(x, x2, x3):
msk = (x >= x2) & (x <= x3)
return jnp.where(msk, 1, 0)
@jjit
def _weighted_quadratic_fit(x, y, w):
deg = 2
lhs = jnp.vander(x, deg + 1)
rhs = y
lhs *= w[:, jnp.newaxis]
rhs *= w
# scale lhs to improve condition number and solve
scale = jnp.sqrt((lhs * lhs).sum(axis=0))
lhs /= scale[jnp.newaxis, :]
c, resids, rank, s = jnp.linalg.lstsq(lhs, rhs)
c = (c.T / scale).T # broadcast scale coefficients
return c
@jjit
def _ew_kernel(
wave,
flux,
line_lo,
line_mid,
line_hi,
cont_lo_lo,
cont_lo_hi,
cont_hi_lo,
cont_hi_hi,
):
quadfit_w = _get_quadfit_weights(
wave, cont_lo_lo, cont_lo_hi, cont_hi_lo, cont_hi_hi
)
c = _weighted_quadratic_fit(wave, flux, quadfit_w)
c2, c1, c0 = c
continuum_strength_at_line = c0 + c1 * line_mid + c2 * line_mid**2
int_w = _get_integration_weights(wave, line_lo, line_hi)
continuum_integrand = int_w * (c0 + c1 * wave + c2 * wave * wave)
spec_integrand = int_w * flux
continuum_flux_integral = jnp.trapz(continuum_integrand, x=wave)
spec_flux_integral = jnp.trapz(spec_integrand, x=wave)
total_line_flux = spec_flux_integral - continuum_flux_integral
equivalent_width = total_line_flux / continuum_strength_at_line
return equivalent_width, total_line_flux
| ArgonneCPAC/dsps | dsps/em_lines/equivalent_width.py | equivalent_width.py | py | 1,699 | python | en | code | 21 | github-code | 13 |
43254519561 | import sys
sys.stdin = open('input.txt')
n = int(input())
num_list = list(map(int, input().split()))
memo = [[0 for _ in range(21)] for _ in range(n+1)]
# memo : i번째 덧/뺄셈의 결과에서 0~20까지 각 숫자가 나온 횟수를 산정
memo[1][num_list[0]] = 1 #case에선 8이므로 8에 해당하는 값 +1
for i in range(1, n):
for j in range(21):
if memo[i][j] > 0:
if 0 <= j - num_list[i] <= 20:
memo[i+1][j-num_list[i]] += memo[i][j]
if 0 <= j + num_list[i] <= 20:
memo[i+1][j+num_list[i]] += memo[i][j]
print(memo[n-1][num_list[-1]])
""" 시간초과
dp = [[num_list[0]]]
for i in range(1, n-1):
temp = []
for j in range(len(dp[i-1])):
plus_value = dp[i-1][j] + num_list[i]
minus_value = dp[i-1][j] - num_list[i]
if 0 <= plus_value <= 20:
temp.append(plus_value)
if 0 <= minus_value <= 20:
temp.append(minus_value)
dp.append(temp)
print(dp[-1])
print(dp[-1].count(num_list[-1]))
""" | KimSoomae/Algoshipda | week3(dp)/골드5/김성현_5557_1학년.py | 김성현_5557_1학년.py | py | 1,041 | python | en | code | 0 | github-code | 13 |
19563538923 | from nose.plugins.attrib import attr
import unittest2 as unittest
from tempest import exceptions
from tempest.common.utils.data_utils import rand_name
from tempest.tests.compute.base import BaseComputeTest
class ConsoleOutputTest(BaseComputeTest):
@classmethod
def setUpClass(cls):
super(ConsoleOutputTest, cls).setUpClass()
cls.client = cls.console_outputs_client
cls.servers_client = cls.servers_client
cls.name = rand_name('server')
resp, server = cls.servers_client.create_server(cls.name,
cls.image_ref,
cls.flavor_ref)
cls.server_id = server['id']
cls.servers_client.wait_for_server_status(cls.server_id, 'ACTIVE')
@classmethod
def tearDownClass(cls):
cls.servers_client.delete_server(cls.server_id)
super(ConsoleOutputTest, cls).tearDownClass()
@attr(type='positive')
def test_get_console_output(self):
"""
Positive test:Should be able to GET the console output
for a given server_id and number of lines
"""
def get_output():
resp, output = self.client.get_console_output(self.server_id, 10)
self.assertEqual(200, resp.status)
self.assertNotEqual(output, None)
lines = len(output.split('\n'))
self.assertEqual(lines, 10)
self.wait_for(get_output)
@attr(type='negative')
def test_get_console_output_invalid_server_id(self):
"""
Negative test: Should not be able to get the console output
for an invalid server_id
"""
try:
resp, output = self.client.get_console_output('!@#$%^&*()', 10)
except exceptions.NotFound:
pass
@attr(type='positive')
@unittest.skip('Until tempest bug 1014683 is fixed.')
def test_get_console_output_server_id_in_reboot_status(self):
"""
Positive test:Should be able to GET the console output
for a given server_id in reboot status
"""
try:
resp, output = self.servers_client.reboot(self.server_id, 'SOFT')
self.servers_client.wait_for_server_status(self.server_id,
'REBOOT')
resp, server = self.servers_client.get_server(self.server_id)
if (server['status'] == 'REBOOT'):
resp, output = self.client.get_console_output(self.server_id,
10)
self.assertEqual(200, resp.status)
self.assertNotEqual(output, None)
lines = len(output.split('\n'))
self.assertEqual(lines, 10)
else:
self.fail("Could not capture instance in Reboot status")
finally:
self.servers_client.wait_for_server_status(self.server_id,
'ACTIVE')
| aristanetworks/arista-ovs-testing | tempest/tempest/tests/compute/test_console_output.py | test_console_output.py | py | 3,044 | python | en | code | 0 | github-code | 13 |
6820589185 | """
Title: Plotter
Description: For plotting data
Author: Janzen Choi
"""
# Libraries
import matplotlib.pyplot as plt
import matplotlib.colors as mcolours
from moga_neml.helper.experiment import DATA_UNITS
# Constants
DEFAULT_PATH = "./plot"
EXP_TRAIN_COLOUR = "silver"
EXP_VALID_COLOUR = "gray"
PRD_DATA_COLOUR = "r"
ALL_COLOURS = list(mcolours.TABLEAU_COLORS) + list(mcolours.BASE_COLORS) + list(mcolours.CSS4_COLORS)
# Plotter class
class Plotter:
def __init__(self, path:str=DEFAULT_PATH, x_label:str="x", y_label:str="y"):
"""
Class for plotting data
Parameters:
* `path`: The path to save the plot
* `x_label`: The label for the x axis
* `y_label`: The label for the y axis
"""
self.path = path
self.x_label = x_label
self.y_label = y_label
def prep_plot(self, title:str="", size:int=12) -> None:
"""
Prepares the plot
Parameters:
* `title`: The title of the plot
* `size`: The size of the font
"""
# Set figure size and title
plt.figure(figsize=(5,5))
plt.title(title, fontsize=size+3, fontweight="bold", y=1.05)
plt.gca().set_position([0.17, 0.12, 0.75, 0.75])
plt.gca().grid(which="major", axis="both", color="SlateGray", linewidth=1, linestyle=":")
# Set x and y labels
x_units = f" ({DATA_UNITS[self.x_label]})" if self.x_label in DATA_UNITS.keys() else ""
y_units = f" ({DATA_UNITS[self.y_label]})" if self.y_label in DATA_UNITS.keys() else ""
plt.xlabel(f"{self.x_label.capitalize()}{x_units}", fontsize=size)
plt.ylabel(f"{self.y_label.capitalize()}{y_units}", fontsize=size)
def set_limits(self, x_limits:tuple=None, y_limits:tuple=None) -> None:
"""
Sets the limits of the x and y scales
Parameters:
* `x_limits`: The upper and lower bounds of the plot for the x scale
* `y_limits`: The upper and lower bounds bound of the plot for the y scale
"""
if x_limits != None:
plt.xlim(*x_limits)
if y_limits != None:
plt.ylim(*y_limits)
def set_log_scale(self, x_log:bool=False, y_log:bool=False) -> None:
"""
Changes the scale of the plot
Parameters:
* `x_log`: Whether to log the x scale
* `y_log`: Whether to log the y scale
"""
if x_log:
plt.xscale("log")
if y_log:
plt.yscale("log")
def scat_plot(self, data_dict:dict, colour:str=EXP_TRAIN_COLOUR, size:int=5, priority:int=1) -> None:
"""
Plots the experimental data using a scatter plot
Parameters:
* `data_dict`: The dictionary to store the data
* `colour`: The colour to plot the data
* `size`: The size of the curve
* `priority`: The priority of the curve
"""
x_list = data_dict[self.x_label]
if self.x_label == "time":
x_list = [x/3600 for x in x_list]
plt.scatter(x_list, data_dict[self.y_label], s=size**2,
marker="o", color=colour, linewidth=1, zorder=priority)
def line_plot(self, data_dict:dict, colour=PRD_DATA_COLOUR, priority:int=1) -> None:
"""
Plots the experimental data using a line plot
Parameters:
* `data_dict`: The dictionary to store the data
* `colour`: The colour to plot the data
* `priority`: The priority of the curve
"""
x_list = data_dict[self.x_label]
if self.x_label == "time":
x_list = [x/3600 for x in x_list]
plt.plot(x_list, data_dict[self.y_label], colour, zorder=priority)
def define_legend(self, colour_list:list, label_list:list, size_list:list, type_list:list) -> None:
"""
Defines the plot legend
Parameters:
* `colour_list`: The colours in the legend
* `label_list`: The keys to add to the legend
* `size_list`: The size of the icons in the legend
* `type_list`: The type of the icons in the legend
"""
for i in range(len(colour_list)):
if type_list[i] == "scatter":
plt.scatter([0], [0], color=colour_list[i], label=label_list[i], s=size_list[i]**2)
elif type_list[i] == "line":
plt.plot([0], [0], color=colour_list[i], label=label_list[i], linewidth=size_list[i])
plt.legend(framealpha=1, edgecolor="black", fancybox=True, facecolor="white")
def save_plot(self) -> None:
"""
Saves the plot
"""
plt.savefig(self.path)
def clear(self) -> None:
"""
Clears the plot
"""
plt.clf()
| ACME-MG/moga_neml | moga_neml/interface/plotter.py | plotter.py | py | 4,835 | python | en | code | 0 | github-code | 13 |
10191525025 | def solution(jobs):
n = len(jobs)
answer = 0
# 소요시간을 기준으로 정렬
jobs = sorted(jobs, key=lambda x: x[1])
start = 0
while jobs:
for i in range(len(jobs)):
if jobs[i][0] <= start:
start += jobs[i][1]
answer += start - jobs[i][0]
#소요시간을 기준으로 정렬하였으니, 가장 첫번째 값만 pop
jobs.pop(i)
break
if i == len(jobs) - 1:
start += 1
return answer // n
# 힙 사용한 풀이
import heapq
def solution(jobs):
answer = 0
start, now = -1, 0
heap = []
i = 0
while i < len(jobs):
for j in jobs:
#현재 구간에 해당하는 항목들 힙에 담기
if start < j[0] <= now:
heapq.heappush(heap, [j[1], j[0]])
#힙에 담긴 항목 중 소요시간이 가장 짧은 것 heappop
if len(heap) > 0:
tmp = heapq.heappop(heap)
start = now
now += tmp[0]
answer += now - tmp[1]
i += 1
else:
now += 1
return answer // len(jobs)
| Jinnie-J/Algorithm-study | programmers/[힙]디스크컨트롤러.py | [힙]디스크컨트롤러.py | py | 1,187 | python | ko | code | 0 | github-code | 13 |
25319990979 | import os
import operator
from functools import reduce
CURRENT_DIRECTORY = os.path.dirname(__file__)
os.chdir(CURRENT_DIRECTORY)
def read_input_lines():
with open('input.txt', 'r') as fh:
return [x.strip() for x in fh.readlines()]
def read_input_text():
with open('input.txt', 'r') as fh:
return fh.read().strip()
def part_a():
grouptexts = read_input_text().split("\n\n")
groupconcats = [x.replace("\n","") for x in grouptexts]
print(sum([len(set(x)) for x in groupconcats]))
def part_b():
grouptexts = read_input_text().split("\n\n")
linesAsSets = [[set(y) for y in x.split()] for x in grouptexts]
print(sum([len(reduce(operator.__and__, x )) for x in linesAsSets]))
part_a()
part_b()
| voidlessVoid/advent_of_code_2020 | day_06/michael/solution.py | solution.py | py | 742 | python | en | code | 0 | github-code | 13 |
23060606070 | import random
import math
import numpy as np
from preset import Preset
from parameter import Parameter
from utils import clip
class RadialGradient(Preset):
"""Radial gradient that responds to onsets"""
speed = Parameter('speed', 0.1)
hue_width = Parameter('hue-width', 0.2)
hue_step = Parameter('hue-step', 0.1)
wave1_amplitude = Parameter('wave1-amplitude', 0.4)
wave1_period = Parameter('wave1-period', 2.5)
wave1_speed = Parameter('wave1-speed', 0.02)
wave2_amplitude = Parameter('wave2-amplitude', 0.3)
wave2_period = Parameter('wave2-period', 3.0)
wave2_speed = Parameter('wave2-speed', -1.0)
#blackout = Parameter('blackout', 0.05)
blackout = Parameter('blackout', 0.6)
#whiteout = Parameter('whiteout', 0.1)
whiteout = Parameter('whiteout', 0)
luminance_speed = Parameter('luminance-speed', 0.25)
luminance_scale = Parameter('luminance-scale', 1.5)
luminance_steps = Parameter('luminance-steps', 256)
def editable_parameters(self):
return [
self.speed,
self.hue_width,
self.hue_step,
self.wave1_amplitude,
self.wave1_speed,
self.wave1_period,
self.wave2_amplitude,
self.wave2_period,
self.wave2_speed,
self.blackout,
self.whiteout,
self.luminance_speed,
self.luminance_scale
]
def on_load(self):
#cx, cy = self.scene().center_point()
cx, cy = self.center_point()
self.locations = self.get_locations_buffer()
x, y = self.locations.T
x -= cx
y -= cy
self.pixel_distances = np.sqrt(np.square(x) + np.square(y))
self.pixel_angles = math.pi + np.arctan2(y, x)
self.pixel_distances /= max(self.pixel_distances)
def prepare(self):
self.hue_inner = random.random()
self.wave1_offset = random.random()
self.wave2_offset = random.random()
self.luminance_offset = random.random()
def draw(self, dt):
#if self._mixer.is_onset():
# self.hue_inner = math.fmod(self.hue_inner + self.hue-step(), 1.0)
# self.luminance_offset += self.hue-step()
self.hue_inner += dt * self.speed()
self.wave1_offset += self.wave1_speed() * dt
self.wave2_offset += self.wave2_speed() * dt
self.luminance_offset += self.luminance_speed() * dt
luminance_table = []
luminance = 0.0
for input in range(self.luminance_steps()):
if input > self.blackout() * self.luminance_steps():
luminance -= 0.01
luminance = clip(0, luminance, 1.0)
elif input < self.whiteout() * self.luminance_steps():
luminance += 0.1
luminance = clip(0, luminance, 1.0)
else:
luminance -= 0.01
luminance = clip(0.5, luminance, 1.0)
luminance_table.append(luminance)
luminance_table = np.asarray(luminance_table)
wave1_period = self.wave1_period()
wave1_amplitude = self.wave1_amplitude()
wave2_period = self.wave2_period()
wave2_amplitude = self.wave2_amplitude()
luminance_scale = self.luminance_scale()
wave1 = np.abs(np.cos(self.wave1_offset + self.pixel_angles * wave1_period) * wave1_amplitude)
wave2 = np.abs(np.cos(self.wave2_offset + self.pixel_angles * wave2_period) * wave2_amplitude)
hues = self.pixel_distances + wave1 + wave2
luminance_indices = np.mod(np.abs(np.int_((self.luminance_offset + hues * luminance_scale) * self.luminance_steps())), self.luminance_steps())
luminances = luminance_table[luminance_indices]
hues = np.fmod(self.hue_inner + hues * self.hue_width(), 1.0)
self.setAllHLS(hues, luminances, 1.0)
| craftyjon/firelight | presets/radial_gradient.py | radial_gradient.py | py | 3,881 | python | en | code | 2 | github-code | 13 |
71329030737 | from grpc_cust.clientapival_client import get_clientinfo, get_clientapikey, get_verified_apikey
def test_clientapival_client():
info = get_clientinfo("mfg")
assert info is not None
apikey = get_clientapikey("IamWrongClient","IamWrongClient")
assert apikey.expiry == "1900-01-01"
apikey = get_clientapikey("mfg","mfg")
token = apikey.apikey
verifiedresult = get_verified_apikey(token)
assert verifiedresult.assertion.split(":")[2] == "/mfg"
apikey = get_clientapikey("eng","eng")
token = apikey.apikey
verifiedresult = get_verified_apikey(token)
assert verifiedresult.assertion.split(":")[2] == "/eng"
| eslywadan/dataservice | tests/clientapival_client_test.py | clientapival_client_test.py | py | 666 | python | en | code | 0 | github-code | 13 |
16984434177 | from __future__ import print_function
from __future__ import absolute_import
import os
import logging
import pickle
import random
import numpy as np
import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.utils import *
from tensorflow.keras import regularizers
from concactLayer import *
from mediumlayer import *
from attention_layer import *
from MultiHeadAttention import *
from LayerNormalization import *
from Position_Embedding import *
from PositionWiseFeedForward import *
from selfattention import *
tf.compat.v1.disable_eager_execution()
seed = 42
np.random.seed(seed)
tf.random.set_seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
random.seed(seed)
logger = logging.getLogger(__name__)
'''
变体:模型输入只要文本上下文
'''
class CodeMF:
def __init__(self, config):
self.config = config
self.text_length = 100
self.queries_length = 25
self.code_length = 350
self.class_model = None
self.train_model = None
self.text_S1 = Input(shape=(self.text_length,),
dtype='int32', name='i_S1name')
self.text_S2 = Input(shape=(self.text_length,),
dtype='int32', name='i_S2name')
self.code = Input(shape=(self.code_length,),
dtype='int32', name='i_codename')
self.queries = Input(shape=(self.queries_length,),
dtype='int32', name='i_queryname')
self.labels = Input(shape=(1,), dtype='int32', name='i_queryname')
self.nb_classes = 2
self.dropout = None
self.model_params = config.get('model_params', dict())
self.data_params = config.get('data_params', dict())
self.text_embbeding = pickle.load(
open(self.data_params['text_pretrain_emb_path'], "rb"), encoding='iso-8859-1')
self.code_embbeding = pickle.load(
open(self.data_params['code_pretrain_emb_path'], "rb"), encoding='iso-8859-1')
# create a model path to store model info
model_dir = self.config['workdir'] + 'models/' + \
self.model_params['model_name'] + '/'
if not os.path.exists(model_dir):
os.makedirs(model_dir)
self.nb_classes = 2
self.dropout1 = None
self.dropout2 = None
self.dropout3 = None
self.dropout4 = None
self.dropout5 = None
self.Regularizer = None
self.random_seed = None
self.num = None
def params_adjust(self, dropout1=0.5, dropout2=0.5, dropout3=0.5, dropout4=0.5, dropout5=0.5, Regularizer=0.01, num=100, seed=42):
self.dropout1 = dropout1
self.dropout2 = dropout2
self.dropout3 = dropout3
self.dropout4 = dropout4
self.dropout5 = dropout5
self.Regularizer = Regularizer
self.random_seed = seed
self.num = num
def build(self):
'''
1. Build Code Representation Model
'''
logger.debug('Building Code Representation Model')
text_S1 = Input(shape=(self.text_length,),
dtype='int32', name='S1name')
text_S2 = Input(shape=(self.text_length,),
dtype='int32', name='S2name')
code = Input(shape=(self.code_length,), dtype='int32', name='codename')
queries = Input(shape=(self.queries_length,),
dtype='int32', name='queryname')
print("===============", text_S1.shape)
'''
2. Embedding
'''
embedding_layer = Embedding(self.text_embbeding.shape[0], self.text_embbeding.shape[1],
weights=[
self.text_embbeding], input_length=self.text_length,
trainable=False, mask_zero=True)
text_S1_embeding = embedding_layer(text_S1)
text_S2_embeding = embedding_layer(text_S2)
'''
3. Position Embedding
'''
position_embedding = Position_Embedding(10, 'concat')
text_S1_embeding_p = position_embedding(text_S1_embeding)
text_S2_embeding_p = position_embedding(text_S2_embeding)
'''
4. Dropout
'''
dropout = Dropout(self.dropout1, name='dropout_embed', seed=self.random_seed)
text_S1_embeding_d = dropout(text_S1_embeding_p)
text_S2_embeding_d = dropout(text_S2_embeding_p)
'''
5. Transformer
'''
attention_layer = MultiHeadAttention_(10)
t1 = attention_layer(
[text_S1_embeding_d, text_S1_embeding_d, text_S1_embeding_d])
t2 = attention_layer(
[text_S2_embeding_d, text_S2_embeding_d, text_S2_embeding_d])
add_out = Lambda(lambda x: x[0] + x[1])
t1 = add_out([t1, text_S1_embeding_d])
t2 = add_out([t2, text_S2_embeding_d])
t1_l = LayerNormalization()(t1)
t2_l = LayerNormalization()(t2)
ff = PositionWiseFeedForward(310, 2048)
ff_t1 = ff(t1_l)
ff_t2 = ff(t2_l)
dropout_ = Dropout(self.dropout2, name='dropout_ffn', seed=self.random_seed)
ff_t1 = dropout_(ff_t1)
ff_t2 = dropout_(ff_t2)
ff_t1 = add_out([ff_t1, t1_l])
ff_t2 = add_out([ff_t2, t2_l])
t1 = LayerNormalization()(ff_t1)
t2 = LayerNormalization()(ff_t2)
'''
5.1 融合代码,上下文语义
'''
dropout = Dropout(self.dropout3, name='dropout_qc', seed=self.random_seed)
# t1 = dropout(t1)
# t2 = dropout(t2)
leaky_relu = Lambda(lambda x: tf.nn.leaky_relu(x))
text_S1_semantic = GlobalAveragePooling1D(name='globaltext_1')(t1)
text_S1_semantic = leaky_relu(text_S1_semantic) # -----------
text_S2_semantic = GlobalAveragePooling1D(name='globaltext_2')(t2)
text_S2_semantic = leaky_relu(text_S2_semantic) # -------------
'''
c_q = MediumLayer()([code_semantic,queries_semantic])
c_q = concatLayer()(c_q)
c_q = Dense(162,activation='tanh',name='qc')(c_q)
'''
# 融合语义
sentence_token_level_outputs = MediumLayer()(
[text_S1_semantic, text_S2_semantic])
layer5 = Bidirectional(GRU(units=128, dropout=self.dropout4))
f1 = layer5(sentence_token_level_outputs)
dropout = Dropout(self.dropout5, name='dropout2', seed=self.random_seed)
f1 = dropout(f1)
# f1 = LayerNormalization()(f1)
# f1 = PositionWiseFeedForward(256, 2048)(f1)
'''
sentence_token_level_outputs = MediumLayer()(
[text_S1_semantic, text_S2_semantic, c_q])
layer5 = Bidirectional(GRU(units=128, return_sequences=True, dropout=self.dropout4))
f1 = layer5(sentence_token_level_outputs)
f1 = Lambda(lambda x:K.permute_dimensions(x,(1,0,2)))(f1)
f1 = Lambda(lambda x: tf.unstack(x, axis=0))(f1)
f1 = Lambda(lambda x:x[-1])(f1)
dropout = Dropout(self.dropout5, name='dropout2', seed=self.random_seed)
f1 = dropout(f1)
'''
'''
7. 分类
'''
classf = Dense(2, activation='softmax', name="final_class",
kernel_regularizer=regularizers.l2(self.Regularizer))(f1)
class_model = Model(inputs=[text_S1, text_S2, code, queries], outputs=[
classf], name='class_model')
self.class_model = class_model
print("\nSummary of class model")
self.class_model.summary()
fname = self.config['workdir'] + 'models/' + \
self.model_params['model_name'] + '/_class_model.png'
P1, P2, Pc, Pq = None, None, None, None
myloss = self.dice_loss(P1, P2, Pc, Pq)
optimizer = Adam(learning_rate=0.001, clipnorm=0.001)
self.class_model.compile(loss=myloss, optimizer=optimizer)
| rouqinghuoliushui98/Code_modification | New_Code/ANN_Staqc_new/models_text.py | models_text.py | py | 7,200 | python | en | code | 0 | github-code | 13 |
7702731832 | """
GPU Metrics from GPUtil.
"""
from node.telemetry.metric import Metric
import GPUtil
class GPU(Metric):
"""
Wrapper for GPUtil GPU information.
"""
def metric_name(self) -> str:
return "gpu"
def measure(self) -> dict:
try:
data = {}
gpus = GPUtil.getGPUs()
for gpu in gpus:
uuid = gpu.uuid
data[uuid] = {}
data[uuid]['uuid'] = gpu.uuid
data[uuid]['load'] = gpu.load
data[uuid]['mem_percent'] = gpu.memoryUtil
data[uuid]['mem_total'] = int(gpu.memoryTotal)
data[uuid]['mem_used'] = int(gpu.memoryUsed)
data[uuid]['driver'] = gpu.driver
data[uuid]['product'] = gpu.name
data[uuid]['serial'] = gpu.serial
data[uuid]['display_mode'] = gpu.display_mode
return data
except Exception as e:
raise ValueError(f'Unable to collect GPU metrics: {e}')
| blackadar/shepherd | node/telemetry/metrics/gpu.py | gpu.py | py | 1,026 | python | en | code | 2 | github-code | 13 |
20999043873 | import os
import logging
import click
import shutil
import hashlib
from collections import defaultdict
logging.basicConfig(
filename="history.log", format="%(asctime)s %(message)s", filemode="a"
)
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
def _checksum(folder_path, file_path):
absolute_path = os.path.join(folder_path, file_path)
return hashlib.md5(open(absolute_path, "rb").read()).hexdigest()
def scan(folder_path):
hashes = defaultdict(list)
files = os.listdir(folder_path)
for file in files:
hash = _checksum(folder_path, file)
hashes[hash].append(file)
return hashes
def fuse(hashes, folder_path, remove_files=False, save_log=True):
if not remove_files:
if not os.path.exists("./duplicates"):
os.makedirs("./duplicates")
for key, value in hashes.items():
if len(value) > 1:
print(f"{key} has more than one file: {value}")
hashes[key].pop(0)
for file in hashes[key]:
file_absolute_path = os.path.join(folder_path, file)
if remove_files: # removes file
os.remove(file_absolute_path)
if save_log:
logger.info(f"{file_absolute_path} deleted succesfully.")
else: # moves file to duplicate folder
shutil.move(file_absolute_path, "./duplicates")
if save_log:
logger.info(
f"'{file_absolute_path}' moved to './duplicates' directory succesfully."
)
else:
print("No duplicates found.")
if save_log:
logger.info(
f"'{file_absolute_path}' moved to './duplicates' directory succesfully."
)
@click.command()
@click.argument("folder_path", default="./")
@click.option(
"-R",
"--remove",
default="False",
type=bool,
help="remove files instead of move",
)
@click.option(
"-L", "--log", default="True", type=bool, help="export records of actions"
)
def main(folder_path, remove, log):
hashes = scan(folder_path)
fuse(hashes, folder_path, remove_files=remove, save_log=log)
if __name__ == "__main__":
main()
| cobanov/easy-duplicate | duplicate.py | duplicate.py | py | 2,293 | python | en | code | 3 | github-code | 13 |
7709997337 | import requests
import pymysql
import csv
##카카오 API
def whole_region(keyword, start_x,start_y,end_x,end_y):
#print(start_x,start_y,end_x,end_y)
page_num = 1
# 데이터가 담길 리스트
all_data_list = []
while (1):
url = 'https://dapi.kakao.com/v2/local/search/keyword.json'
params = {'query': keyword, 'page': page_num,
'rect': f'{start_x},{start_y},{end_x},{end_y}'}
headers = {"Authorization": "KakaoAK 67ce0f01ed0a5ab169be0758cad914cb"}
## 입력예시 -->> headers = {"Authorization": "KakaoAK f64acbasdfasdfasf70e4f52f737760657"}
resp = requests.get(url, params=params, headers=headers)
search_count = resp.json()['meta']['total_count']
# print('총 개수', search_count)
if search_count > 45:
print('좌표 4등분')
dividing_x = (start_x + end_x) / 2
dividing_y = (start_y + end_y) / 2
## 4등분 중 왼쪽 아래
all_data_list.extend(whole_region(keyword, start_x, start_y, dividing_x, dividing_y))
## 4등분 중 오른쪽 아래
all_data_list.extend(whole_region(keyword, dividing_x, start_y, end_x, dividing_y))
## 4등분 중 왼쪽 위
all_data_list.extend(whole_region(keyword, start_x, dividing_y, dividing_x, end_y))
## 4등분 중 오른쪽 위
all_data_list.extend(whole_region(keyword, dividing_x, dividing_y, end_x, end_y))
return all_data_list
else:
if resp.json()['meta']['is_end']:
all_data_list.extend(resp.json()['documents'])
return all_data_list
# 아니면 다음 페이지로 넘어가서 데이터 저장
else:
print('다음페이지')
page_num += 1
all_data_list.extend(resp.json()['documents'])
def overlapped_data(keyword, start_x, start_y, next_x, next_y, num_x, num_y):
# 최종 데이터가 담길 리스트
overlapped_result = []
# 지도를 사각형으로 나누면서 데이터 받아옴
for i in range(1, num_x + 1): ## 1,10
end_x = start_x + next_x
initial_start_y = start_y
for j in range(1, num_y + 1): ## 1,6
end_y = initial_start_y + next_y
each_result = whole_region(keyword, start_x, initial_start_y, end_x, end_y)
overlapped_result.extend(each_result)
initial_start_y = end_y
start_x = end_x
return overlapped_result
conn = pymysql.connect(host='127.0.0.1', port=3306, user='root', password='1234', db='company1')
c = conn.cursor()
c.execute(f"select store,longitude,latitude from company1.starbucks")
temp_db = c.fetchall()
conn.close()
result=[]
for i in range(len(temp_db)):
print(f'-----------------{temp_db[i][0]}--------------------')
start_x= temp_db[i][1]-0.004480
start_y= temp_db[i][2]-0.005445
next_x=0.001 # 좌표 이동
next_y=0.001 # 좌표 이동
num_x=11 # 움직일 경도
num_y=11 # 움직일 위도
a_list=['학교']
b_list=[]
for j in a_list:
overlapped_result = overlapped_data(j, start_x, start_y, next_x, next_y, num_x, num_y)
b_list.append(overlapped_result)
for k in b_list[0]:
if k['category_group_name'] == '학교':
print(temp_db[i][0],k['place_name'],k['road_address_name'])
temp=[temp_db[i][0],k['place_name'],k['road_address_name']]
result.append(temp)
print(result)
# csv파일에 넣기
f = open('star_education.csv', 'w', newline='')
wr = csv.writer(f)
wr.writerows(result)
f.close()
| lsgyeong/companyproject1 | kakaocrawling.py | kakaocrawling.py | py | 3,698 | python | en | code | 0 | github-code | 13 |
35547771870 | """
This script takes a paired alignment file and, assigns each end to a bin (some chunk of
a chromosome defined by supplied bin size), and prints out the bin-bin counts for only
contacts within some width of the diagonal (distance between the bins).
Prints a unique format. The file starts with a series of lines that start with a #.
These are the total bin counts, used to do normalization, if desired, subsequently. After
that, the format is compressed:
chromosome bin1 bin2 count
Some programming notes to check for: handling double counting of diagonal.
Suggested use: use this script to generate 500 bp bin data, use downscreen normalization script to
combine bins to generate larger bins if desired.
"""
from optparse import OptionParser
import sys
import re
import gzip
import numpy as np
def parse_options():
parser = OptionParser()
parser.add_option("-f", "--files", dest="filenames",
help="paired alignment files, comma separated", metavar="FILE")
parser.add_option("-b", "--bin_size",
dest="bin_size", default=1000000,
help="bin size")
parser.add_option("-w", "--width",
dest="width", default=1000,
help="width in bins from diagonal")
parser.add_option("-s", "--file_stem",
dest="file_stem", default='none',
help="output file stem. Adds diag_bin_counts bin size and chr")
(options, args) = parser.parse_args()
return options
def Add_read (chr, bin1, bin2, num_bins):
if (chr not in bin_bin_counts):
bin_bin_counts[chr] = np.zeros((num_bins, num_bins))
bin_bin_counts[chr][bin1][bin2] = bin_bin_counts[chr][bin1][bin2] + 1
def add_to_totals(chr, bin, num_bins):
if (chr in bin_totals):
bin_totals[chr][bin] = bin_totals[chr][bin] + 1
else:
bin_totals[chr] = np.zeros(num_bins)
def update_max_bin(chr, bin1, bin2):
if (chr in max_bin):
return(max(bin1, bin2, max_bin[chr]))
else:
return(max(bin1, bin2))
options = parse_options()
bin_size = int(options.bin_size)
filenames = options.filenames
files = filenames.split(',')
width = int(options.width)
num_bins = int(1e5) #dummy variable for array creation...chromosome entrants must be <50 million bp
bin_bin_counts = {}
bin_totals = {}
max_bin = {}
line_count = 0
for f in files:
if (f[-2:] == 'gz'):
infile = gzip.open(f, 'rt')
else:
infile = open(f, 'r')
for line in infile:
line_count = line_count + 1
if (line_count % 10000000 == 0):
print('. ' + str(line_count / 10000000))
line = line.rstrip()
items = line.split()
(chr1, Lmost1, chr2, Lmost2) = items[2], int(items[3]), items[5], int(items[6])
if ((chr1 == chr2)):
bin1 = int(Lmost1 / bin_size)
bin2 = int(Lmost2 / bin_size)
add_to_totals(chr1, bin1, num_bins)
if (bin1 != bin2):
add_to_totals(chr1, bin2, num_bins)
max_bin[chr1] = update_max_bin(chr1, bin1, bin2)
if (abs(bin1 - bin2) <= width):
Add_read(chr1, bin1, bin2, num_bins) # Avoid double counting on diagonal. This will be triggered unless chromosome and bin are the same
if (bin1 != bin2):
Add_read(chr1, bin2, bin1, num_bins)
infile.close()
file_stem = ''
if (options.file_stem == 'none'):
file_stem = re.sub('.txt', '', files[0])
else:
file_stem = options.file_stem
print('done reading\n')
for chr in bin_totals.keys():
with open(file_stem + '_CompressedBinCounts_' + str(bin_size) + 'bp_' + str(chr) + '.txt','w') as outfile:
#print bin counts
for bin in range(0, max_bin[chr] + 1):
outfile.write('#' + chr + '\t' + str(bin) + '\t')
outfile.write(str(bin_totals[chr][bin]) + '\n')
#print bin_bin counts
for bin1 in range(0, max_bin[chr] + 1):
for bin2 in range(max(0, bin1 - width), min(max_bin[chr] + 1, bin1 + width + 1)):
outfile.write(chr + '\t' + str(bin1) + '\t' + str(bin2) + '\t' + str(bin_bin_counts[chr][bin1][bin2]) + '\n')
| michaelrstadler/hic | bin/archive/HiC_bincounts_generate_compressed_allchr.py | HiC_bincounts_generate_compressed_allchr.py | py | 3,839 | python | en | code | 0 | github-code | 13 |
17081473324 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.HeatMapData import HeatMapData
class AlipayCommerceTransportTaxiHeatmapQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayCommerceTransportTaxiHeatmapQueryResponse, self).__init__()
self._heatmap_data = None
@property
def heatmap_data(self):
return self._heatmap_data
@heatmap_data.setter
def heatmap_data(self, value):
if isinstance(value, HeatMapData):
self._heatmap_data = value
else:
self._heatmap_data = HeatMapData.from_alipay_dict(value)
def parse_response_content(self, response_content):
response = super(AlipayCommerceTransportTaxiHeatmapQueryResponse, self).parse_response_content(response_content)
if 'heatmap_data' in response:
self.heatmap_data = response['heatmap_data']
| alipay/alipay-sdk-python-all | alipay/aop/api/response/AlipayCommerceTransportTaxiHeatmapQueryResponse.py | AlipayCommerceTransportTaxiHeatmapQueryResponse.py | py | 972 | python | en | code | 241 | github-code | 13 |
31234528949 | def homework_2(lst): # 请同学记得把档案名称改成自己的学号(ex.1104813.py)
count = 0
for i in range(len(lst)): #判断数字是否为奇数
if (lst[i]+1) % 2 == 0:
lst[i] += 1
count += 1
for i in range(len(lst)-1): #判断后一个数字是否比前面的大
while lst[i] >= lst[i+1]:
lst[i+1] += 2
count += 2
continue
return count
if __name__ == '__main__':
lst = [1,1,1]
print(homework_2(lst))
| daniel880423/Member_System | file/hw2/1080406/s1080406_4.py | s1080406_4.py | py | 517 | python | zh | code | 0 | github-code | 13 |
17938130632 | import requests
from requests.cookies import RequestsCookieJar
import json
import time
import os
import sys
import datetime
import copy
sys_args = int(sys.argv[1])
reverse_data = datetime.datetime.now() + datetime.timedelta(days=6)
reverse_data = reverse_data.strftime('%Y-%m-%d')
reverse_time = ["09:00", "09:30", "10:00", "10:30", "11:00", "11:30", "12:00", "12:30", "13:00", "13:30", "14:00",
"14:30", "15:00", "15:30", "16:00", "16:30", "17:00", "17:30", "18:00", "18:30", "19:00"]
time_begin = time.time()
header = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:80.0) Gecko/20100101 Firefox/80.0",
"Connection": "keep-alive"
}
url = "http://lims.gzzoc.com/account/appointment/BeforeBook"
url_confirm = "http://lims.gzzoc.com/account/appointment/book"
data = {
"instrumentId": "1096",
"beginTime": "2020-09-20 09:30",
"endTime": "2020-09-20 10:30"
}
def get_cookies():
cookiejar = RequestsCookieJar()
with open("./cookies.txt", "r") as f:
cookies = json.loads(f.read())
for cookie in cookies:
cookiejar.set(cookie['name'], cookie['value'])
return cookiejar
def add_reverse_data(args_index):
args_begin_time = reverse_data + " " + reverse_time[args_index]
args_end_time = reverse_data + " " + reverse_time[args_index + 4]
data['beginTime'] = args_begin_time
data['endTime'] = args_end_time
return data
if __name__ == '__main__':
cookies = get_cookies()
data = add_reverse_data(sys_args)
data_confirm = copy.copy(data)
data_confirm.update({"remarks": "wish a better result"})
while True:
time_present = time.time()
session = requests.session()
if (time_present - time_begin) > 400:
break
else:
pass
session_reu = session.post(url=url, cookies=cookies, headers=header, data=data_confirm, verify=False)
print("时间段" + data['beginTime'] + "--" + data['endTime'] + session_reu.text[38:49])
if "true" in session_reu.text:
session_confirm = session.post(url=url_confirm, cookies=cookies, headers=header, data=data_confirm, verify=False)
while not ("true" in session_confirm.text):
session_confirm = session.post(url=url_confirm, cookies=cookies, headers=header, data=data_confirm, verify=False)
print("时间段" + data['beginTime'] + "--" + data['endTime'] + session_confirm.text[38:49])
sys.exit(0)
else:
print("时间段" + data['beginTime'] + "--" + data['endTime'] + session_reu.text[38:49])
| Thinknoon/python_reservstion | main_improved.py | main_improved.py | py | 2,668 | python | en | code | 0 | github-code | 13 |
32080025620 | class Node(object):
def __init__(self, value):
self.value = value
self.left = None
self.right = None
class BinaryTree(object):
def __init__(self, root):
self.root = Node(root)
self.stack = []
def traverse(self, node):
curr = node
while True:
if curr:
self.stack.append(curr)
curr = curr.left
elif self.stack:
curr = self.stack.pop()
print(curr.value)
curr = curr.right
else:
break
"""
Sample test case
8
/ \
4 12
/ \ / \
1 6 9 15
/ \
5 10
Should print:
1
4
5
6
8
9
10
12
15
"""
# Set up tree:
tree = BinaryTree(8)
tree.left = Node(4)
tree.left.left = Node(1)
tree.left.right = Node(6)
tree.left.right.left = Node(5)
tree.right = Node(12)
tree.right.left = Node(9)
tree.right.left.right = Node(10)
tree.right.right = Node(15)
tree.traverse(tree)
#print(tree.print_tree("preorder"))
#print(tree.print_tree("inorder"))
#print(tree.print_tree("postorder"))
| samgh/Byte-by-Byte-Solutions | python/InorderTraversal.py | InorderTraversal.py | py | 1,141 | python | en | code | 154 | github-code | 13 |
74253715858 | """Script to train a GAN.
Examples:
python main.py --dataset folder --dataroot /path/to/datasets/celeba \
--crop_size 160 --image_size 80 --code_size 256 --norm weight \
--lr 0.00002 --r_iterations 1 --niter 300000 \
--save_path /path/to/checkpoints/exp01 \
#--load_path /path/to/checkpoints/exp01
Trains a network on CelebA with learning rate 0.00002 for 300k iterations
to generate 80x80 images. Saves checkpoints and other stuff to
/path/to/checkpoints/exp01. Uncomment the load_path part to continue a
previous experiment.
You MUST have run common/split_data.py on the dataset before training.
"""
from __future__ import print_function, division
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import argparse
import math
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from torch.autograd import Variable
import numpy as np
from scipy import misc
import time
import random
from collections import defaultdict
from common import plotting
from common import util
from common.model import *
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', required = True,
help = 'cifar10 | lsun | imagenet | folder | lfw')
parser.add_argument('--lsun_class', default = 'bedroom',
help = 'class of lsun dataset to use')
parser.add_argument('--dataroot', required = True,
help = 'path to dataset')
parser.add_argument('--batch_size', type = int, default = 32,
help = 'input batch size')
parser.add_argument('--image_size', type = int, default = -1,
help = 'image size')
parser.add_argument('--width', type = int, default = -1,
help = 'image width')
parser.add_argument('--height', type = int, default = -1,
help = 'image height')
parser.add_argument('--crop_size', type = int, default = -1,
help = 'crop size before scaling')
parser.add_argument('--crop_width', type = int, default = -1,
help = 'crop width before scaling')
parser.add_argument('--crop_height', type = int, default = -1,
help = 'crop height before scaling')
parser.add_argument('--code_size', type = int, default = 128,
help = 'size of latent code')
parser.add_argument('--nfeature', type = int, default = 64,
help = 'number of features of first conv layer')
parser.add_argument('--nlayer', type = int, default = -1,
help = 'number of down/up conv layers')
parser.add_argument('--norm', default = 'none',
help = 'type of normalization: none | batch | weight | weight-affine')
parser.add_argument('--save_path', default = None,
help = 'path to save generated files')
parser.add_argument('--load_path', default = None,
help = 'load to continue existing experiment')
parser.add_argument('--lr', type = float, default = 0.0001,
help = 'learning rate')
parser.add_argument('--test_interval', type = int, default = 10000,
help = 'how often to test reconstruction')
parser.add_argument('--test_lr', type = float, default = 0.01,
help = 'learning rate for reconstruction test')
parser.add_argument('--test_steps', type = int, default = 50,
help = 'number of steps in running reconstruction test')
parser.add_argument('--vis_interval', type = int, default = 2000,
help = 'how often to save generated samples')
parser.add_argument('--vis_size', type = int, default = 10,
help = 'size of visualization grid')
parser.add_argument('--vis_row', type = int, default = -1,
help = 'height of visualization grid')
parser.add_argument('--vis_col', type = int, default = -1,
help = 'width of visualization grid')
parser.add_argument('--save_interval', type = int, default = 5000,
help = 'how often to save network')
parser.add_argument('--niter', type = int, default = 50000,
help = 'number of iterations to train')
parser.add_argument('--final_test', action = 'store_true', default = False,
help = 'do final test')
parser.add_argument('--ls', action = 'store_true', default = False,
help = 'use LSGAN')
parser.add_argument('--output_scale', action = 'store_true', default = False,
help = 'save x*2-1 instead of x when saving image')
parser.add_argument('--net', default = 'best',
help = 'network to load for final test: best | last | <niter>')
parser.add_argument('--lambda_r', type = float, default = 0.9,
help = 'strength of MSE on R')
parser.add_argument('--spatial_dropout_r', type = float, default = 0,
help = 'Spatial dropout applied to R')
parser.add_argument('--r_iterations', type = int, default = 3,
help = 'how many LIS modules to use in G')
parser.add_argument('--always_train_all', action='store_true', default=False,
help = 'whether to always train with all LIS modules')
parser.add_argument('--load_tolerant', action='store_true', default=False,
help = 'whether to load state dict for G in a tolerant way, i.e. will not complain upon mismatches')
parser.add_argument('--nb_cache_total', type = int, default = 0,
help = 'size of the dataset cache')
parser.add_argument('--nb_cache_lists', type = int, default = 1,
help = 'number of caches to use, a value of N means that a specific image can be cached in up to N different versions (i.e. crops)')
parser.add_argument('--cache_p_drop', type = float, default = 0.1,
help = 'chance to drop a data entry from the cache')
parser.add_argument('--augment', default='none',
help = 'name of augmentation set to use')
parser.add_argument('--g_upscaling', default='fractional',
help = 'upscaling method to use in G: fractional|nearest|bilinear')
parser.add_argument('--d_dropout', type = float, default = 0,
help = 'dropout probability to use in D before the last layer')
opt = parser.parse_args()
print(opt)
transform_list = []
if (opt.crop_height > 0) and (opt.crop_width > 0):
transform_list.append(transforms.CenterCrop(opt.crop_height, crop_width))
elif opt.crop_size > 0:
transform_list.append(transforms.CenterCrop(opt.crop_size))
if (opt.height > 0) and (opt.width > 0):
transform_list.append(transforms.Scale(opt.height, opt.width))
elif opt.image_size > 0:
transform_list.append(transforms.Scale(opt.image_size))
transform_list.append(transforms.CenterCrop(opt.image_size))
opt.height = opt.image_size
opt.width = opt.image_size
else:
raise ValueError('must specify valid image size')
if opt.augment == "flowers102":
transform_list.append(transforms.RandomHorizontalFlip())
from imgaug import augmenters as iaa
seq = iaa.Sequential([
iaa.Sometimes(0.5, iaa.AdditiveGaussianNoise(scale=(0, 0.035*255), per_channel=False)),
iaa.Sometimes(0.5, iaa.Multiply((0.9, 1.1), per_channel=False)),
iaa.Sometimes(0.5, iaa.ContrastNormalization((0.9, 1.1), per_channel=False)),
iaa.Sometimes(0.5, iaa.Affine(scale={"x": (0.9, 1.1), "y": (0.9, 1.1)}, rotate=(-15, 15), order=3, mode="symmetric"))
], random_order=True)
transform_list.append(util.ImgaugPytorchWrapper(seq))
elif opt.augment == "cifar10":
transform_list.append(transforms.RandomHorizontalFlip())
from imgaug import augmenters as iaa
seq = iaa.Sequential([
iaa.Sometimes(0.5, iaa.AdditiveGaussianNoise(scale=(0, 0.035*255), per_channel=False)),
iaa.Sometimes(0.5, iaa.Multiply((0.9, 1.1), per_channel=False)),
iaa.Sometimes(0.5, iaa.ContrastNormalization((0.9, 1.1), per_channel=False))
], random_order=True)
transform_list.append(util.ImgaugPytorchWrapper(seq))
elif opt.augment == "10kcats":
transform_list.append(transforms.RandomHorizontalFlip())
from imgaug import augmenters as iaa
seq = iaa.Sequential([
#iaa.Sometimes(0.5, iaa.AdditiveGaussianNoise(scale=(0, 0.035*255), per_channel=False)),
#iaa.Sometimes(0.3, iaa.AdditiveGaussianNoise(scale=(0, 0.005*255), per_channel=False)),
iaa.Sometimes(0.5, iaa.Multiply((0.9, 1.1), per_channel=False)),
iaa.Sometimes(0.5, iaa.ContrastNormalization((0.9, 1.1), per_channel=False)),
#iaa.Sometimes(0.5, iaa.Affine(scale={"x": (0.9, 1.1), "y": (0.9, 1.1)}, rotate=(-20, 20), order=3, mode="symmetric"))
iaa.Sometimes(0.5, iaa.Affine(scale={"x": (0.9, 1.1), "y": (0.9, 1.1)}, rotate=(-15, 15), order=3, mode="symmetric"))
], random_order=True)
transform_list.append(util.ImgaugPytorchWrapper(seq))
elif opt.augment == "lsun_churches":
transform_list.append(transforms.RandomHorizontalFlip())
from imgaug import augmenters as iaa
seq = iaa.Sequential([
iaa.Sometimes(0.5, iaa.AdditiveGaussianNoise(scale=(0, 0.035*255), per_channel=False)),
iaa.Sometimes(0.5, iaa.Multiply((0.9, 1.1), per_channel=False)),
iaa.Sometimes(0.5, iaa.ContrastNormalization((0.9, 1.1), per_channel=False)),
iaa.Sometimes(0.5, iaa.Affine(translate_percent={"x": (-0.1, 0.1), "y": (-0.1, 0.1)}, scale={"x": (0.9, 1.1), "y": (0.9, 1.1)}, rotate=(-1, 1), order=1, mode="constant"))
], random_order=True)
transform_list.append(util.ImgaugPytorchWrapper(seq))
elif opt.augment == "none":
transform_list.append(transforms.RandomHorizontalFlip())
else:
raise Exception("--augment must be 'flowers102' or 'cifar10' or '10kcats' or 'none'")
transform_list.append(transforms.ToTensor())
data_index = torch.load(os.path.join(opt.dataroot, 'data_index.pt'))
train_index = data_index['train']
if opt.final_test:
test_index = data_index['final_test']
else:
test_index = data_index['running_test']
if (opt.vis_row <= 0) or (opt.vis_col <= 0):
opt.vis_row = opt.vis_size
opt.vis_col = opt.vis_size
if opt.nlayer < 0:
opt.nlayer = 0
s = max(opt.width, opt.height)
while s >= 8:
s = (s + 1) // 2
opt.nlayer = opt.nlayer + 1
if opt.dataset == 'cifar10':
dataset1 = datasets.CIFAR10(root = opt.dataroot, download = True,
transform = transforms.Compose(transform_list))
dataset2 = datasets.CIFAR10(root = opt.dataroot, train = False,
transform = transforms.Compose(transform_list))
def get_data(k):
if k < len(dataset1):
return dataset1[k][0]
else:
return dataset2[k - len(dataset1)][0]
else:
if opt.dataset in ['imagenet', 'folder', 'lfw']:
dataset = datasets.ImageFolder(root = opt.dataroot,
transform = transforms.Compose(transform_list))
elif opt.dataset == 'lsun':
dataset = datasets.LSUN(db_path = opt.dataroot, classes = [opt.lsun_class + '_train'],
transform = transforms.Compose(transform_list))
if opt.nb_cache_total <= 0:
def get_data(k):
return dataset[k][0]
else:
class CachedDataset(object):
def __init__(self, nb_cache_total, nb_cache_lists, p_drop):
self.cache = [defaultdict(list) for _ in range(nb_cache_lists)]
self.nb_cache_total = nb_cache_total
self.nb_cache_lists = nb_cache_lists
self.nb_cached_total = 0
self.p_drop = p_drop
def __call__(self, k):
l = random.randint(0, self.nb_cache_lists-1)
cache_list = self.cache[l]
if k in cache_list:
#print("cache hit")
if random.random() < self.p_drop:
#print("drop entry")
del cache_list[k]
self.nb_cached_total -= 1
else:
return cache_list[k]
el = dataset[k][0]
if self.nb_cached_total < self.nb_cache_total:
cache_list[k] = el
self.nb_cached_total += 1
return el
get_data = CachedDataset(opt.nb_cache_total, opt.nb_cache_lists, opt.cache_p_drop)
gen = GeneratorLearnedInputSpace(opt.width, opt.height, opt.nfeature, opt.nlayer, opt.code_size, opt.norm, n_lis_layers=opt.r_iterations, upscaling=opt.g_upscaling)
print(gen)
gen.cuda()
testfunc = nn.MSELoss()
if not opt.final_test:
dis = build_discriminator(opt.width, opt.height, opt.nfeature, opt.nlayer, opt.norm, opt.d_dropout)
print(dis)
dis.cuda()
if opt.ls:
lossfunc = nn.MSELoss()
else:
lossfunc = nn.BCELoss()
lossfunc_r = nn.MSELoss()
gen_opt = optim.RMSprop(gen.parameters(), lr = opt.lr, eps = 1e-6, alpha = 0.9)
dis_opt = optim.RMSprop(dis.parameters(), lr = opt.lr, eps = 1e-6, alpha = 0.9)
history = plotting.History()
history.add_group("loss-r-mix", ["train-r%d" % (i,) for i in range(opt.r_iterations)], increasing=False)
history.add_group("loss-g-mix", ["train-g%d" % (i,) for i in range(1+opt.r_iterations)], increasing=False)
history.add_group("loss-d-mix", ["train-d-real"] + ["train-d-fake%d" % (i,) for i in range(1+opt.r_iterations)], increasing=False)
state = {}
def load_state_dict_tolerant(model, pretrained_dict):
model_dict = model.state_dict()
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
def load_state(path, prefix, gen_only = False):
if opt.load_tolerant:
load_state_dict_tolerant(gen, torch.load(os.path.join(opt.load_path, 'net_archive', '{0}_gen.pt'.format(prefix))))
else:
gen.load_state_dict(torch.load(os.path.join(opt.load_path, 'net_archive', '{0}_gen.pt'.format(prefix))))
if not gen_only:
if not opt.load_tolerant:
gen_opt.load_state_dict(torch.load(os.path.join(opt.load_path, 'net_archive', '{0}_gen_opt.pt'.format(prefix))))
dis.load_state_dict(torch.load(os.path.join(opt.load_path, 'net_archive', '{0}_dis.pt'.format(prefix))))
dis_opt.load_state_dict(torch.load(os.path.join(opt.load_path, 'net_archive', '{0}_dis_opt.pt'.format(prefix))))
state.update(torch.load(os.path.join(opt.load_path, 'net_archive', '{0}_state.pt'.format(prefix))))
state["history"] = plotting.History.from_string(state["history"])
def save_state(path, prefix):
torch.save(gen.state_dict(), os.path.join(opt.save_path, 'net_archive', '{0}_gen.pt'.format(prefix)))
torch.save(gen_opt.state_dict(), os.path.join(opt.save_path, 'net_archive', '{0}_gen_opt.pt'.format(prefix)))
torch.save(dis.state_dict(), os.path.join(opt.save_path, 'net_archive', '{0}_dis.pt'.format(prefix)))
torch.save(dis_opt.state_dict(), os.path.join(opt.save_path, 'net_archive', '{0}_dis_opt.pt'.format(prefix)))
state.update({
'index_shuffle' : index_shuffle,
'current_iter' : current_iter,
'best_iter' : best_iter,
'min_loss' : min_loss,
'current_sample' : current_sample,
'history': history.to_string()
})
torch.save(state, os.path.join(opt.save_path, 'net_archive', '{0}_state.pt'.format(prefix)))
loss_plotter = plotting.LossPlotter(
history.get_group_names(),
history.get_groups_increasing(),
save_to_fp=os.path.join(opt.save_path, "loss.jpg")
)
loss_plotter.start_batch_idx = 100
def visualize_real(images, filename):
images_np = [(img.cpu().numpy()*255).astype(np.uint8).transpose((1, 2, 0)) for img in images]
grid = util.draw_grid(list(images_np))
misc.imsave(filename, grid)
def visualize(code, filename, filename_r, filename_all):
gen.eval()
generated_by_riter = [[] for _ in range(1+opt.r_iterations)]
for i in xrange((code.size(0) - 1) // opt.batch_size + 1):
batch_size = min(opt.batch_size, code.size(0) - i * opt.batch_size)
batch_code = Variable(code[i * opt.batch_size : i * opt.batch_size + batch_size])
for r_iter in xrange(1+opt.r_iterations):
imgs, _ = gen(batch_code, n_execute_lis_layers=r_iter)
if opt.output_scale:
imgs = imgs * 2 - 1
imgs_np = (imgs.data.cpu().numpy()*255).astype(np.uint8).transpose((0, 2, 3, 1))
generated_by_riter[r_iter].extend(imgs_np)
generated_all = []
for i in xrange(len(generated_by_riter[0])):
block = [imgs[i] for imgs in generated_by_riter]
generated_all.append(np.hstack(block))
misc.imsave(filename, util.draw_grid(generated_by_riter[0], cols=opt.vis_col))
for r_iter in xrange(1, 1+opt.r_iterations):
misc.imsave(filename_r.format(r_iter-1), util.draw_grid(generated_by_riter[r_iter], cols=opt.vis_col))
misc.imsave(filename_all, util.draw_grid(generated_all, cols=opt.vis_col))
gen.train()
def test():
test_loss = 0
for param in gen.parameters():
param.requires_grad = False
gen.eval()
best_code = torch.Tensor(test_index.size(0), opt.code_size).cuda()
total_batch = (test_index.size(0) - 1) // opt.batch_size + 1
for i in range(total_batch):
if opt.final_test:
print('Testing batch {0} of {1} ...'.format(i + 1, total_batch))
batch_size = min(opt.batch_size, test_index.size(0) - i * opt.batch_size)
batch_code = Variable(torch.zeros(batch_size, opt.code_size).cuda())
batch_code.requires_grad = True
batch_target = torch.Tensor(batch_size, 3, opt.height, opt.width)
for j in range(batch_size):
batch_target[j].copy_(get_data(test_index[i * opt.batch_size + j]))
batch_target = Variable(batch_target.cuda())
test_opt = optim.RMSprop([batch_code], lr = opt.test_lr, eps = 1e-6, alpha = 0.9)
for j in range(opt.test_steps):
generated, _ = gen(batch_code)
loss = testfunc(generated, batch_target)
loss.backward()
test_opt.step()
batch_code.grad.data.zero_()
best_code[i * opt.batch_size : i * opt.batch_size + batch_size].copy_(batch_code.data)
generated, _ = gen(batch_code)
loss = testfunc(generated, batch_target)
test_loss = test_loss + loss.data[0] * batch_size
if opt.final_test:
print('batch loss = {0}'.format(loss.data[0]))
sample_rec_pair = torch.Tensor(2, 3, opt.height, opt.width)
for j in range(batch_size):
sample_rec_pair[0].copy_(get_data(test_index[i * opt.batch_size + j]))
sample_rec_pair[1].copy_(generated.data[j])
if opt.output_scale:
torchvision.utils.save_image(sample_rec_pair * 2 - 1, os.path.join(opt.load_path, '{0}_test'.format(opt.net), '{0}.png'.format(i * opt.batch_size + j)), 2)
else:
torchvision.utils.save_image(sample_rec_pair, os.path.join(opt.load_path, '{0}_test'.format(opt.net), '{0}.png'.format(i * opt.batch_size + j)), 2)
for param in gen.parameters():
param.requires_grad = True
gen.train()
if not opt.final_test:
visualize(
best_code[0 : min(test_index.size(0), opt.vis_row * opt.vis_col)],
filename=os.path.join(opt.save_path, 'running_test', 'test_{0}.jpg'.format(current_iter)),
filename_r=os.path.join(opt.save_path, 'running_test', 'r{0}_test_%d.jpg' % (current_iter,)),
filename_all=os.path.join(opt.save_path, 'running_test', 'all_test_{0}.jpg'.format(current_iter))
)
test_loss = test_loss / test_index.size(0)
print('loss = {0}'.format(test_loss))
return test_loss
def makedirs():
if not os.path.exists(opt.save_path):
os.makedirs(opt.save_path)
for sub_folder in ('samples', 'samples_all', 'running_test', 'net_archive', 'log'):
if not os.path.exists(os.path.join(opt.save_path, sub_folder)):
os.mkdir(os.path.join(opt.save_path, sub_folder))
for r_iter in range(opt.r_iterations):
fp = os.path.join(opt.save_path, "samples_r%d" % (r_iter,))
if not os.path.exists(fp):
os.mkdir(fp)
if opt.final_test:
load_state(opt.load_path, opt.net, True)
if not os.path.exists(os.path.join(opt.load_path, '{0}_test'.format(opt.net))):
os.mkdir(os.path.join(opt.load_path, '{0}_test'.format(opt.net)))
final_loss = test()
torch.save(final_loss, os.path.join(opt.load_path, '{0}_test'.format(opt.net), 'loss.pt'))
else:
if opt.load_path is not None:
if opt.save_path is None:
opt.save_path = opt.load_path
if opt.load_path != opt.save_path:
makedirs()
vis_code = torch.load(os.path.join(opt.load_path, 'samples', 'vis_code.pt')).cuda()
load_state(opt.load_path, 'last')
index_shuffle = state['index_shuffle']
current_iter = state['current_iter']
best_iter = state['best_iter']
min_loss = state['min_loss']
current_sample = state['current_sample']
history = state['history']
else:
if opt.save_path is None:
raise ValueError('must specify save path if not continue training')
makedirs()
vis_code = torch.randn(opt.vis_row * opt.vis_col, opt.code_size).cuda()
torch.save(vis_code, os.path.join(opt.save_path, 'samples', 'vis_code.pt'))
index_shuffle = torch.randperm(train_index.size(0))
current_iter = 0
best_iter = 0
min_loss = 1e100
current_sample = 0
vis_target = torch.Tensor(min(test_index.size(0), opt.vis_row * opt.vis_col), 3, opt.height, opt.width)
for i in range(vis_target.size(0)):
vis_target[i].copy_(get_data(test_index[i]))
if opt.output_scale:
torchvision.utils.save_image(vis_target * 2 - 1, os.path.join(opt.save_path, 'running_test', 'target.jpg'), opt.vis_row)
else:
torchvision.utils.save_image(vis_target, os.path.join(opt.save_path, 'running_test', 'target.jpg'), opt.vis_row)
ones = Variable(torch.ones(opt.batch_size, 1).cuda())
zeros = Variable(torch.zeros(opt.batch_size, 1).cuda())
zeros_half = Variable(torch.zeros(opt.batch_size//2, 1).cuda())
loss_record = torch.zeros(opt.test_interval, 3)
visualize(
vis_code,
filename=os.path.join(opt.save_path, 'samples', 'sample_{0}.jpg'.format(current_iter)),
filename_r=os.path.join(opt.save_path, 'samples_r{0}', 'sample_{0}_r.jpg'.format(current_iter)),
filename_all=os.path.join(opt.save_path, 'samples_all', 'sample_{0}_all.jpg'.format(current_iter))
)
visualize_real(
[get_data(train_index[index_shuffle[i]]) for i in xrange(64)],
filename=os.path.join(opt.save_path, 'real_images.jpg')
)
# train for --niter batches
while current_iter < opt.niter:
time_start = time.time()
current_iter = current_iter + 1
current_loss_record = loss_record[(current_iter - 1) % opt.test_interval]
loss_values_g = [None for _ in range(opt.r_iterations+1)]
loss_values_d_real = []
loss_values_d_fake = [None for _ in range(opt.r_iterations+1)]
loss_values_r = [None for _ in range(opt.r_iterations)]
for param in dis.parameters():
param.requires_grad = True
dis.zero_grad()
# train D on real data
true_sample = torch.Tensor(opt.batch_size, 3, opt.height, opt.width)
time_sample_sum = 0
for i in range(opt.batch_size):
time_sample_start = time.time()
true_sample[i].copy_(get_data(train_index[index_shuffle[current_sample]]))
time_sample_end = time.time()
time_sample_sum += (time_sample_end - time_sample_start)
current_sample = current_sample + 1
if current_sample == train_index.size(0):
current_sample = 0
index_shuffle = torch.randperm(train_index.size(0))
true_sample = Variable(true_sample.cuda())
loss_d_real = lossfunc(dis(true_sample), ones)
loss_d_real.backward()
loss_values_d_real.append(loss_d_real.data[0])
del true_sample
# train D on fake data (G)
rand_code = Variable(torch.randn(opt.batch_size, opt.code_size).cuda(), volatile=True)
generated, lis_layers = gen(rand_code, n_execute_lis_layers=None if not opt.always_train_all else opr.r_iterations)
generated = Variable(generated.data)
loss_d_fake = lossfunc(dis(generated), zeros)
loss_d_fake.backward()
loss_values_d_fake[len(lis_layers)] = loss_d_fake.data[0]
dis_opt.step()
# train G
for param in dis.parameters():
param.requires_grad = False
gen.zero_grad()
do_train_r = (opt.lambda_r > 0)
rand_code = Variable(torch.randn(opt.batch_size, opt.code_size).cuda())
generated, lis_layers = gen(rand_code, n_execute_lis_layers=None if not opt.always_train_all else opr.r_iterations)
loss_g = lossfunc(dis(generated), ones)
loss_g.backward(retain_graph=do_train_r if len(lis_layers) > 0 else False)
loss_values_g[len(lis_layers)] = loss_g.data[0]
if do_train_r:
for i, lis_layer_result in enumerate(lis_layers):
loss_r = lossfunc_r(lis_layer_result, rand_code)
loss_r = loss_r * (opt.lambda_r ** (i+1))
loss_r.backward(retain_graph=True if (i+1) < len(lis_layers) else False)
loss_values_r[i] = loss_r.data[0]
gen_opt.step()
# postprocess batch
# save losses, plot/visualize, print message, save network
current_loss_record[0] = loss_values_d_real[0] if loss_values_d_real[0] is not None else 0
current_loss_record[1] = loss_values_d_fake[0] if loss_values_d_fake[0] is not None else 0
current_loss_record[2] = loss_values_g[0] if loss_values_g[0] is not None else 0
msg = ["%d |" % (current_iter,)]
lvdr = np.average([v for v in loss_values_d_real if v is not None])
history.add_value("loss-d-mix", "train-d-real", current_iter, lvdr)
msg.append("d-real: %.4f" % (lvdr,))
for i, loss_value_d_fake in enumerate(loss_values_d_fake):
if loss_value_d_fake is not None:
history.add_value("loss-d-mix", "train-d-fake%d" % (i,), current_iter, loss_value_d_fake)
msg.append("d-fake%d: %.4f" % (i, loss_value_d_fake if loss_value_d_fake else -1))
for i, loss_value_g in enumerate(loss_values_g):
if loss_value_g is not None:
history.add_value("loss-g-mix", "train-g%d" % (i,), current_iter, loss_value_g)
msg.append("g%d: %.4f" % (i, loss_value_g if loss_value_g is not None else -1))
for i, loss_value_r in enumerate(loss_values_r):
if loss_value_r is not None:
history.add_value("loss-r-mix", "train-r%d" % (i,), current_iter, np.clip(loss_value_r, 0, 1))
msg.append("r%d: %.4f" % (i, loss_value_r if loss_value_r is not None else -1))
time_end = time.time()
msg.append("t:%.2fs (t_real: %.3fs)" % (time_end - time_start, time_sample_sum))
print(" ".join(msg))
if current_iter % opt.vis_interval == 0:
visualize(
vis_code,
filename=os.path.join(opt.save_path, 'samples', 'sample_{0}.jpg'.format(current_iter)),
filename_r=os.path.join(opt.save_path, 'samples_r{0}', 'sample_{0}_r.jpg'.format(current_iter)),
filename_all=os.path.join(opt.save_path, 'samples_all', 'sample_{0}_all.jpg'.format(current_iter))
)
if current_iter % 2500 == 0:
loss_plotter.plot(history)
if current_iter % opt.test_interval == 0:
print('Testing ...')
current_loss = test()
log = {
'training_loss' : loss_record,
'test_loss' : current_loss
}
torch.save(log, os.path.join(opt.save_path, 'log', 'loss_{0}.pt'.format(current_iter)))
if current_loss < min_loss:
print('new best network!')
min_loss = current_loss
best_iter = current_iter
save_state(opt.save_path, 'best')
save_state(opt.save_path, 'last')
if current_iter % opt.save_interval == 0:
save_state(opt.save_path, current_iter)
| aleju/gan-error-avoidance | g_lis/main.py | main.py | py | 26,045 | python | en | code | 23 | github-code | 13 |
6921550367 | from f5.sdk_exception import F5SDKError
from f5_heat.resources import f5_cm_cluster
from heat.common.exception import ResourceFailure
from heat.common import template_format
from heat.engine.hot.template import HOTemplate20150430
from heat.engine import rsrc_defn
from heat.engine import template
import mock
import pytest
cluster_template_defn = '''
heat_template_version: 2015-04-30
description: Testing clustering tempalte
resources:
bigip_rsrc1:
type: F5::BigIP::Device
properties:
ip: 10.0.0.1
username: admin
password: admin
bigip_rsrc2:
type: F5::BigIP::Device
properties:
ip: 10.0.0.2
username: admin
password: admin
bigip_rsrc3:
type: F5::BigIP::Device
properties:
ip: 10.0.0.3
username: admin
password: admin
cluster:
type: F5::Cm::Cluster
properties:
device_group_name: test_cluster
devices: [bigip_rsrc1, bigip_rsrc2, bigip_rsrc3]
device_group_partition: Common
device_group_type: sync-failover
'''
versions = ('2015-04-30', '2015-04-30')
@mock.patch.object(template, 'get_version', return_value=versions)
@mock.patch.object(
template,
'get_template_class',
return_value=HOTemplate20150430
)
def mock_template(
templ_vers,
templ_class,
test_templ=cluster_template_defn
):
'''Mock a Heat template for the Kilo version.'''
templ_dict = template_format.parse(test_templ)
return templ_dict
def create_resource_definition(templ_dict):
'''Create a resource definition.'''
rsrc_def = rsrc_defn.ResourceDefinition(
'test_stack',
templ_dict['resources']['cluster']['type'],
properties=templ_dict['resources']['cluster']['properties']
)
return rsrc_def
@pytest.fixture
def F5CmCluster():
'''Instantiate the F5CmCluster resource.'''
template_dict = mock_template()
rsrc_def = create_resource_definition(template_dict)
f5_cm_cluster.ClusterManager.__init__ = mock.MagicMock(return_value=None)
f5_cm_cluster.ClusterManager.create = mock.MagicMock()
f5_cm_cluster.ClusterManager.teardown = mock.MagicMock()
mock_bigip = mock.MagicMock(name='fake-bigip')
mock_stack = mock.MagicMock(name='fake-stack')
cluster = f5_cm_cluster.F5CmCluster(
"cluster", rsrc_def, mock_stack
)
cluster.stack.resource_by_refid().get_bigip.return_value = mock_bigip
return cluster, mock_bigip
@pytest.fixture
def ClusterMgrF5SDKError():
template_dict = mock_template()
rsrc_def = create_resource_definition(template_dict)
f5_cm_cluster.ClusterManager.__init__ = mock.MagicMock()
f5_cm_cluster.ClusterManager.__init__.side_effect = ResourceFailure(
F5SDKError('test'), None, action='Create'
)
return f5_cm_cluster.F5CmCluster(
"cluster", rsrc_def, mock.MagicMock()
)
# Tests
def test_handle_create(F5CmCluster):
cluster, mock_bigip = F5CmCluster
create_result = cluster.handle_create()
assert create_result is None
assert f5_cm_cluster.ClusterManager.create.call_args == \
mock.call(
devices=[mock_bigip, mock_bigip, mock_bigip],
device_group_name='test_cluster',
device_group_partition='Common',
device_group_type='sync-failover'
)
def test_handle_create_fdsdkerror(ClusterMgrF5SDKError):
with pytest.raises(ResourceFailure) as ex:
ClusterMgrF5SDKError.handle_create()
assert 'F5SDKError: test' in ex.value.message
def test_handle_delete(F5CmCluster):
cluster, mock_bigip = F5CmCluster
create_result = cluster.handle_delete()
assert create_result is True
assert f5_cm_cluster.ClusterManager.__init__.call_args == \
mock.call(
devices=[mock_bigip, mock_bigip, mock_bigip],
device_group_name='test_cluster',
device_group_partition='Common',
device_group_type='sync-failover'
)
assert f5_cm_cluster.ClusterManager.teardown.call_args == mock.call()
def test_handle_delete_f5sdkerror(ClusterMgrF5SDKError):
with pytest.raises(ResourceFailure) as ex:
ClusterMgrF5SDKError.handle_delete()
assert 'F5SDKError: test' in ex.value.message
def test_resource_mapping():
rsrc_map = f5_cm_cluster.resource_mapping()
assert rsrc_map == {'F5::Cm::Cluster': f5_cm_cluster.F5CmCluster}
| F5Networks/f5-openstack-heat-plugins | f5_heat/resources/test/test_f5_cm_cluster.py | test_f5_cm_cluster.py | py | 4,376 | python | en | code | 7 | github-code | 13 |
26575588722 | class Solution:
def longestPalindromeSubseq(self, s: str) -> int:
# Use dinamic programming to the result
if len(s) == 0:
return 0
DP = [[0] * (len(s) + 1) for i in range(len(s) + 1)]
reverse_s = s[::-1]
#
for i in range(1, len(s) + 1):
for j in range(1, len(s) + 1):
# if the char in string and reverse string is same
if s[i - 1] == reverse_s[j - 1]:
DP[i][j] = DP[i - 1][j - 1] + 1
else:
DP[i][j] = max(DP[i - 1][j], DP[i][j - 1])
return DP[-1][-1]
| ujas09/Leetcode | 516.py | 516.py | py | 633 | python | en | code | 0 | github-code | 13 |
3801919602 | # -*- coding: utf-8 -*-
import torch
import torch.nn as nn
import torch.nn.functional as F
import sys
sys.path.append("..")
from utils import box_blur, CBAM, FastGuidedFilter
def upsample(x, h, w):
return F.interpolate(x, size=[h,w], mode='bicubic', align_corners=True)
class ResBlock(nn.Module):
def __init__(self,
in_channels,
out_channels):
super(ResBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, 3, padding=1)
self.conv2 = nn.Conv2d(out_channels,out_channels, 3, padding=1)
self.relu = nn.ReLU(True)
def forward(self, x):
x = x+self.conv2(self.relu(self.conv1(x)))
return x
class GPNN(nn.Module):
def __init__(self,
ms_channels,
pan_channels,
n_feat,
n_layer):
super(GPNN, self).__init__()
self.n_layer = n_layer
relu = nn.ReLU()
# feat_extractor ms
feat_extractor_ms = [nn.Conv2d(ms_channels, n_feat, 3, padding=1), relu]
for i in range(n_layer-1):
feat_extractor_ms.append(ResBlock(n_feat, n_feat))
self.feat_extractor_ms = nn.ModuleList(feat_extractor_ms)
# feat_extractor pan
feat_extractor_pan = [nn.Conv2d(pan_channels, n_feat, 3, padding=1), relu]
for i in range(n_layer-1):
feat_extractor_pan.append(ResBlock(n_feat, n_feat))
self.feat_extractor_pan = nn.ModuleList(feat_extractor_pan)
# Attention
self.attention = nn.ModuleList([CBAM(n_feat, 2) for i in range(n_layer)])
# guided fusion unit
self.guided_filter = FastGuidedFilter(1, 1e-4)
# reconstruction
self.recon = nn.Sequential(
nn.Conv2d(n_feat*n_layer, n_feat, 1),
relu,
nn.Conv2d(n_feat, ms_channels, 3, padding=1)
)
def get_high_freq(self, x):
return x-box_blur(x, kernel_size=[5,5])
def forward(self, ms, pan=None):
# ms - low-resolution multi-spectral image [N,C,h,w]
# pan - high-resolution panchromatic image [N,1,H,W]
if type(pan)==torch.Tensor:
pass
elif pan==None:
raise Exception('User does not provide pan image!')
# 0. up-sample ms
N,C,h,w = ms.shape
_,_,H,W = pan.shape
ms0 = upsample(ms, H, W)
guided_ms = []
# 1. backbone
# ms = self.get_high_freq(ms)
# pan = self.get_high_freq(pan)
for i in range(self.n_layer):
ms = self.feat_extractor_ms[i](ms)
pan = self.feat_extractor_pan[i](pan)
lr_pan = upsample(pan, h, w)
guided_ms.append(self.attention[i](self.guided_filter(lr_pan,ms,pan)))
# 2. reconstruction
guided_ms = torch.cat(guided_ms, dim=1)
ms = self.recon(guided_ms)
ms = ms0+ms
return ms
class Discriminator(nn.Module):
def __init__(self,
in_channel,
base_channel=32):
super(Discriminator, self).__init__()
self.model = nn.Sequential(
nn.Conv2d(in_channel, base_channel, 3, stride=2, padding=1),
nn.BatchNorm2d(base_channel),
nn.LeakyReLU(inplace=True),
nn.Conv2d(base_channel, 2*base_channel, 3, stride=2, padding=1),
nn.BatchNorm2d(2*base_channel),
nn.LeakyReLU(inplace=True),
nn.Conv2d(2*base_channel, 4*base_channel, 3, stride=2, padding=1),
nn.BatchNorm2d(4*base_channel),
nn.LeakyReLU(inplace=True),
nn.Conv2d(4*base_channel, 8*base_channel, 3, padding=1),
nn.BatchNorm2d(8*base_channel),
nn.LeakyReLU(inplace=True),
nn.Conv2d(8*base_channel, 1, 1),
nn.BatchNorm2d(1),
nn.Sigmoid()
)
self.model.apply(weights_init_normal)
def forward(self, output_img, input_img, input_pan):
_,_,H,W = input_pan.shape
input_img = upsample(input_img, H,W)
img = torch.cat((output_img, input_img, input_pan), dim=1)
validity = self.model(img)
return validity
def weights_init_normal(m):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
torch.nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find("BatchNorm2d") != -1:
torch.nn.init.normal_(m.weight.data, 1.0, 0.02)
torch.nn.init.constant_(m.bias.data, 0.0)
# test
from torchsummary import summary
summary(GPNN(10,1,32,5).cuda(), [(10,32,32),(1,64,64)])
| Zhaozixiang1228/Pansharpening-FGF-GAN | models/FGF_GAN.py | FGF_GAN.py | py | 4,684 | python | en | code | 5 | github-code | 13 |
21148807037 | from django.db import models
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.db.models.signals import post_save
from django.dispatch import receiver
# Create your models here.
GenderChoices=(
('Male','Male'),
('Female','Female'),
)
YearChoices = (
('1','First'),
('2','Second'),
('3','Third'),
('4','Fourth'),
('MCA-1','MCA-First'),
('MCA-2','MCA-Second'),
('MBA-1','MBA-First'),
('MBA-2','MBA-Second'),
)
BranchChoices = (
('CSE','Computer Science and Engineering'),
('IT','Information Technology'),
('ECE','Electronics and Communication Engineering'),
('ME','Mechanical Engineering'),
('CE','Civil Engineering'),
('EN','Electricals Engineering'),
('EI','Electronics and Instrumentation'),
('MBA','MBA'),
('MCA','MCA'),
)
AccommodationChoices=(
('Hosteler','Hosteler'),
('Day Scholar','Day Scholar'),
)
class Profile(models.Model):
user = models.OneToOneField(User,on_delete=models.CASCADE)
fullname = models.CharField(max_length=100, default='')
gender = models.CharField(max_length=10, choices=GenderChoices, default='')
branch = models.CharField(max_length=250, choices=BranchChoices, default='CSE')
year = models.CharField(max_length=20, choices=YearChoices, default='1')
session = models.CharField(max_length=20, default='')
contact_details = models.CharField(max_length=15, default='')
accommodation = models.CharField(max_length=20, choices=AccommodationChoices, default='')
profile_photo = models.FileField(null=True)
def __str__(self):
return self.user.username
def get_absolute_url(self):
return reverse('accounts:profile', kwargs={'pk': self.pk})
| anshulsharma1011/ssaksham | accounts/models.py | models.py | py | 1,760 | python | en | code | 0 | github-code | 13 |
41029348880 | from googleapiclient.discovery import build # used for Google sheets info
from google.oauth2 import service_account # also used for Google sheets info
import random # used for generating random choice (duh)
import config # used to hold our sensitive info
import time # used for sleep (lol)
SERVICE_ACCOUNT_FILE = 'keys.json'
SCOPES = ['https://www.googleapis.com/auth/spreadsheets']
creds = None # writes this variable to no value before overwriting it with the info we need, basically cleaning and prepping it
# noinspection PyRedeclaration
creds = service_account.Credentials.from_service_account_file(
SERVICE_ACCOUNT_FILE, scopes=SCOPES) # writes the creds value with the value from the keys json file above
service = build('sheets', 'v4', credentials=creds)
sheet = service.spreadsheets()
watched_or_unwatched_prompt = str(input(
"Would you like to watch a new random movie today or a random movie you have already watched? \nPlease type: W for watched or U for unwatched:\n(at any time type X to exit the program)\n")).upper()
result_headers_w = sheet.values().get(spreadsheetId=config.config_stuff['SAMPLE_SPREADSHEET_ID'],
range="Movies!I2:N2").execute()
column_headers_w = result_headers_w.get('values', []) # get values from spreadsheet
result_headers_u = sheet.values().get(spreadsheetId=config.config_stuff['SAMPLE_SPREADSHEET_ID'],
range="Movies!A2:F2").execute()
column_headers_u = result_headers_u.get('values', []) # get values from spreadsheet
running = True
while running:
if watched_or_unwatched_prompt == "W":
# grab the sheet info from the API
result_w = sheet.values().get(spreadsheetId=config.config_stuff['SAMPLE_SPREADSHEET_ID'],
range="Movies!I:L").execute()
# build that into a list of lists
watched_movies = result_w.get('values', [])
# make the program seem more retro and realistic
print("Generating random watched movie, please wait...\n")
# sleeps make people think the program works better believe it or not. lol
time.sleep(3)
print(column_headers_w) # This prints this line into text to make the output easier to understand
print(random.choice(
watched_movies)) # This prints a single random output from all the values in the watched columns.
# determine whether user is satisfied with the randomly generated selection
satisfied_input_w = str(input(
"\nAre you satisfied with this result?\nPlease enter Y or N:\n(At any time type X to exit the program)\n")).upper()
# if statements to determine whether the code should run agian or not. (Note that continue here will only generate a movie in the U or W category, whichever was selected at the start).
if satisfied_input_w == "Y":
running = False
elif satisfied_input_w == "N":
continue
elif satisfied_input_w == "X":
running = False
else:
print(
"I didn't quite catch that. Would you like to watch a movie you've seen before or a new movie? Please entier W or U:\n")
elif watched_or_unwatched_prompt == "U":
# grab the sheet info from the API
result_u = sheet.values().get(spreadsheetId=config.config_stuff['SAMPLE_SPREADSHEET_ID'],
range="Movies!A:F").execute()
# build that into a list of lists
unwatched_movies = result_u.get('values', [])
# make the program seem more retro and realistic
print("Generating random unwatched movie, please wait...\n")
# sleeps make people think the program works better, believe it or not. lol
time.sleep(3)
print(column_headers_u) # This prints this line into text to make the output easier to understand
print(random.choice(
unwatched_movies)) # This prints a single random output from all the values in the unwatched columns
# determine whether user is satisfied with the randomly generated selection
satisfied_input_u = str(input(
"\nAre you satisfied with this result?\nPlease enter Y or N:\n(At any time type X to exit the program)\n")).upper()
# if statements to determine whether the code should run agian or not. (Note that continue here will only generate a movie in the U or W category, whichever was selected at the start).
if satisfied_input_u == "Y":
running = False
elif satisfied_input_u == "N":
continue
elif satisfied_input_u == "X":
running = False
else:
print(
"I didn't quite catch that. Would you like to watch a movie you've seen before or a new movie? Please entier W or U:\n")
# finish the program / break the loop.
elif watched_or_unwatched_prompt == "X":
running = False
# tell the user they are a moron for not inputting the one of only 2 options they had available to them.
else:
print(
"I didn't quite catch that. Would you like to watch a movie you've seen before or a new movie? Please entier W or U:\n")
continue
# just so the user knows for sure...
print("Program finished.")
| Voltaic314/Movie-Picker-For-Google-Sheets-In-Python | Movie-Picker.py | Movie-Picker.py | py | 5,447 | python | en | code | 0 | github-code | 13 |
2181041381 | #!usr/bin/python
# -*- coding: utf-8 -*-
import logging
import networkx as nx
import random
import numpy as np
import time
from TriangulationAlgorithms import TriangulationAlgorithm as ta
def triangulate_LexM(G, randomized=False, repetitions=1, reduce_graph=True, timeout=-1):
algo = Algorithm_LexM(G, reduce_graph, timeout)
if not randomized:
algo.run()
return {
"H" : algo.get_triangulated(),
"size" : len(algo.get_triangulation_edges()),
"alpha" : algo.get_alpha(),
"mean" : len(algo.get_triangulation_edges()),
"variance" : 0,
"repetitions" : 1
}
else:
H_opt = None
alpha_opt = None
size_opt = None
all_sizes = []
for i in range(repetitions):
algo.run_randomized()
all_sizes.append(len(algo.get_triangulation_edges()))
if H_opt == None or len(algo.get_triangulation_edges()) < size_opt:
H_opt = algo.get_triangulated()
alpha_opt = algo.get_alpha()
size_opt = len(algo.get_triangulation_edges())
return {
"H" : H_opt,
"size" : size_opt,
"alpha" : alpha_opt,
"mean" : np.mean(all_sizes),
"variance" : np.var(all_sizes),
"repetitions" : repetitions
}
class Algorithm_LexM(ta.TriangulationAlgorithm):
'''
Args:
G : a graph in netwokx format
randomize : if set to True, the order in which the nodes are processed is randomized
Returns:
H : a minimal triangulation of G.
alpha : the corresponding minimal elimination ordering of G
'''
def __init__(self, G, reduce_graph=True, timeout=-1):
logging.info("=== LexM.Algorithm_LexM.init ===")
super().__init__(G, reduce_graph, timeout)
self.alpha = {}
def get_alpha(self):
return self.alpha
def triangulate(self, C, randomized=False):
'''
Implementation of LEX M Algorithm
Rose, Tarjan, Lueker: Algorithmic Aspects of Vertex Elimination on Graphs
https://epubs.siam.org/doi/abs/10.1137/0205021
to construct a minimal elemination ordering alpha of a graph G
and the corresponding minimal triangulation H(G, alpha)
Args:
C : a graph in networkx format
randomized : if true, the algorithm get_maxlex_node is randomized.
Returns:
F : a set of edges s.t. C + F is a minimal triangulation C.
'''
logging.info("=== triangulate_LEX_M ===")
F = []
n = len(C)
nodelabels = {node : [] for node in C}
all_unnumbered_vertices = [n for n in C if n not in self.alpha]
if randomized:
random.shuffle(all_unnumbered_vertices)
for i in range(n,0, -1):
# check timeout:
if self.timeout > 0 and time.time() > self.timeout:
raise ta.TimeLimitExceededException("Time Limit Exceeded!")
logging.debug("Iteration: "+str(i))
node_v = self.get_maxlex_node(C, nodelabels, randomized)
logging.debug("max lex node: "+str(node_v))
self.alpha[node_v] = i
all_unnumbered_vertices.remove(node_v)
S = []
logging.debug("all unnumbered nodes:")
logging.debug([str(n)+": "+str(nodelabels[n]) for n in all_unnumbered_vertices])
for node_u in all_unnumbered_vertices:
smallerlex_nodes = [n for n in all_unnumbered_vertices if list_lexicographic_is_less_than(nodelabels[n], nodelabels[node_u])]+[node_v, node_u]
logging.debug("start Node "+str(node_v)+" label: "+str(nodelabels[node_v]))
logging.debug("target Node "+str(node_u)+" label: "+str(nodelabels[node_u]))
if nx.has_path(C.subgraph(smallerlex_nodes),node_v, node_u):
logging.debug("Add target node "+str(node_u)+" to set S")
S.append(node_u)
for node_u in S:
nodelabels[node_u].append(i)
if (node_v, node_u) not in C.edges():
F.append((node_v, node_u))
logging.debug("added edge: "+str((node_v, node_u)))
logging.debug("End of iteration. all node labels:")
logging.debug([str(n)+": "+str(nodelabels[n]) for n in C])
return F
def get_maxlex_node(self, G, nodelabels, randomized=False):
'''
Get an unnumbered vertex v of lexicograpohically maximum label from G
Args:
G : a graph in networkx format
randomized : if set to True and if there are multiple nodes with the max lex. label, one of these is returned at random
Returns:
v : an unnumbered vertex v of lexicograpohically maximum label from G
'''
logging.info("=== get_maxlex_node ===")
current_max_label = ''
current_best_node = None
nodes = [n for n in G]
if randomized:
random.shuffle(nodes)
for node in G:
if (node not in self.alpha) and ((current_best_node == None) or (list_lexicographic_is_less_than(current_max_label, nodelabels[node]))):
current_best_node = node
current_max_label = nodelabels[node]
return current_best_node
def list_lexicographic_is_less_than(list_1, list_2):
'''
computes a lexicographic ordering relation of two lists
if list_1 < list_2 returns True
otherwise false
Args:
list_1 : a list of integers
list_2 : a list of integers
Return:
True, if list_1 < list_2 as defined above, otherwise False
'''
#logging.info("=== list_lexicographic_is_less_than ===")
n = min(len(list_1), len(list_2))
for i in range(n):
if list_1[i] < list_2[i]:
return True
elif list_1[i] > list_2[i]:
return False
if len(list_1) < len(list_2):
return True
elif len(list_1) > len(list_2):
return False
return False
| Feathergunner/Triangulation | TriangulationAlgorithms/LEX_M.py | LEX_M.py | py | 5,214 | python | en | code | 2 | github-code | 13 |
14646334595 | from sqlalchemy import Column, Identity, Integer, String, Table
from . import metadata
PaymentMethodDetailsLinkJson = Table(
"payment_method_details_linkjson",
metadata,
Column(
"country",
String,
comment="Two-letter ISO code representing the funding source country beneath the Link payment.\nYou could use this attribute to get a sense of international fees",
nullable=True,
),
Column("id", Integer, primary_key=True, server_default=Identity()),
)
__all__ = ["payment_method_details_link.json"]
| offscale/stripe-sql | stripe_openapi/payment_method_details_link.py | payment_method_details_link.py | py | 550 | python | en | code | 1 | github-code | 13 |
74843556177 |
import re
import bpy
import numpy as np
from mathutils import Matrix
from . import faceit_utils as futils
from . import fc_dr_utils
def apply_matrix_to_all_mesh_data(mesh_data, matrix):
'''Apply a matrix to all mesh data'''
# Apply matrix to mesh data
mesh_data = np.matmul(mesh_data, matrix.to_3x3().transposed())
mesh_data += matrix.translation
return mesh_data
def get_mesh_data(obj, dg=None, evaluated=True):
'''Get evaluated or basis shape data'''
if evaluated:
verts = obj.evaluated_get(dg).data.vertices
else:
verts = obj.data.vertices
vert_count = len(verts)
data = np.zeros(vert_count * 3, dtype=np.float32)
verts.foreach_get('co', data.ravel())
data = data.reshape(vert_count, 3)
return data
def has_shape_keys(obj):
'''Returns True when the object data holds Shape Keys'''
if hasattr(obj.data, 'shape_keys'):
return hasattr(obj.data.shape_keys, 'key_blocks')
else:
return False
def set_slider_max(shape_key, value, highest_value=True):
if highest_value:
if value < shape_key.slider_max:
return
shape_key.slider_max = max(max(shape_key.slider_min + 0.001, 1.0), value)
def set_slider_min(shape_key, value, lowest_value=True):
if lowest_value:
if value > shape_key.slider_min:
return
shape_key.slider_min = min(min(shape_key.slider_max - 0.001, 0.0), value)
def set_rest_position_shape_keys(objects=None, expressions_filter=None) -> None:
'''Set all shape keys to default 0.0 value'''
auto_key = bpy.context.scene.tool_settings.use_keyframe_insert_auto
bpy.context.scene.tool_settings.use_keyframe_insert_auto = False
if objects is None:
objects = futils.get_faceit_objects_list()
for obj in objects:
if has_shape_keys(obj):
if expressions_filter:
for sk in obj.data.shape_keys.key_blocks:
if sk.name in expressions_filter:
sk.value = 0.0
else:
for sk in obj.data.shape_keys.key_blocks:
sk.value = 0
bpy.context.scene.tool_settings.use_keyframe_insert_auto = auto_key
def get_enum_shape_key_actions(self, context):
global actions
actions = []
# for a in get_all_shape_key_actions():
for a in bpy.data.actions:
if any(['key_block' in fc.data_path for fc in a.fcurves]) or not a.fcurves:
actions.append((a.name,) * 3)
if not actions:
actions.append(("None", "None", "None"))
return actions
def get_all_shape_key_actions():
'''Return available shape key actions in the blendfile'''
global actions
actions = []
for a in bpy.data.actions:
if any(['key_block' in fc.data_path for fc in a.fcurves]) or not a.fcurves:
actions.append(a)
return actions
def get_shape_key_names_from_objects(objects=None) -> list:
shape_key_names = []
if not objects:
objects = futils.get_faceit_objects_list()
for obj in objects:
if has_shape_keys(obj):
shape_key_names.extend([sk.name for sk in obj.data.shape_keys.key_blocks if sk.name != 'Basis'])
return list(set(shape_key_names))
def get_shape_key_names_from_action(action):
shape_key_names = []
for fc in action.fcurves:
if fc.is_empty:
continue
dp = str(fc.data_path)
if 'key_blocks' in dp:
found_shapes = re.findall(r"['\"](.*?)['\"]", dp)
if found_shapes:
for shape in found_shapes:
shape_key_names.append(shape)
return shape_key_names
def get_shape_keys_from_faceit_objects_enum(self, context):
'''Returns a items list to be used in EnumProperties'''
# blender is prone to crash without making shapes global
global shapes
shapes = []
if context is None:
print('get_shape_keys_from_faceit_objects_enum --> Context is None')
return shapes
faceit_objects = futils.get_faceit_objects_list()
if faceit_objects:
shape_key_names = get_shape_key_names_from_objects(faceit_objects)
for i, name in enumerate(shape_key_names):
shapes.append((name, name, name, i))
else:
shapes.append(("None", "None", "None"))
return shapes
def store_shape_keys(obj):
'''
Store all shapekeys data in numpy arrays
Returns a dict that holds (data as np array and meta properties (value, relative_key etc.))
'''
sk_dict = {}
if not has_shape_keys(obj):
return sk_dict
vert_count = len(obj.data.vertices)
src_shape_keys = obj.data.shape_keys
i = 0
for sk in src_shape_keys.key_blocks[1:]:
# numpy array with shapekey data
sk_shape_data = np.zeros(vert_count * 3, dtype=np.float32)
sk.data.foreach_get('co', sk_shape_data.ravel())
# Get driver
stored_drivers = []
if src_shape_keys.animation_data:
for dr in src_shape_keys.animation_data.drivers:
if 'key_blocks["{}"].'.format(sk.name) in dr.data_path:
stored_drivers.append(fc_dr_utils.copy_driver_data(dr))
sk_dict[sk.name] = {
'data': sk_shape_data,
'drivers': stored_drivers,
'value': sk.value,
'mute': sk.mute,
'relative_key': sk.relative_key,
'slider_min': sk.slider_min,
'slider_max': sk.slider_max,
'vertex_group': sk.vertex_group,
'interpolation': sk.interpolation,
'index': i,
}
i += 1
return sk_dict
def apply_stored_shape_keys(obj, sk_dict, new_order_list=None, apply_drivers=True):
''' Apply the saved shapekey data from @sk_dict to objects shapekeys in new order from @export_order_dict '''
# The new index for the shapekey with name shapekey_name
relative_key = None
if not has_shape_keys(obj):
relative_key = obj.shape_key_add(name='Basis')
# Apply all shape keys in reorder list first. Then others
reordered_shapes = []
# New order dict: {sk_name, index}
if new_order_list:
for shapekey_name in new_order_list:
sk_data = sk_dict.get(shapekey_name)
if sk_data:
reordered_shapes.append(shapekey_name)
apply_shape_key_from_data(obj, shapekey_name, sk_data, relative_key, apply_drivers=apply_drivers)
else:
print('cannot apply the order becuase the shape key {} has not been found.'.format(shapekey_name))
for shapekey_name, sk_data in sk_dict.items():
if shapekey_name not in reordered_shapes:
apply_shape_key_from_data(obj, shapekey_name, sk_data, relative_key, apply_drivers=apply_drivers)
def apply_shape_key_from_data(obj, shapekey_name, sk_data, relative_key, apply_drivers=True):
''' Create a new Shape Key and populate the stored data/properties/drivers '''
if shapekey_name in obj.data.shape_keys:
print('The Shape Key {} already exists.'.format(shapekey_name))
return
new_sk = obj.shape_key_add(name=shapekey_name)
# Load the Shape Datask_shape_data = sk_data['data']
sk_shape_data = sk_data['data']
new_sk.data.foreach_set('co', sk_shape_data.ravel())
# Load the Meta Datanew_sk.value = sk_data.get('value', new_sk.value)
new_sk.slider_min = sk_data.get('slider_min', new_sk.slider_min)
new_sk.slider_max = sk_data.get('slider_max', new_sk.slider_max)
new_sk.mute = sk_data.get('mute', new_sk.mute)
new_sk.value = sk_data.get('value', new_sk.value)
new_sk.relative_key = relative_key or sk_data.get('relative_key', new_sk.relative_key)
new_sk.vertex_group = sk_data.get('vertex_group', new_sk.vertex_group)
new_sk.interpolation = sk_data.get('interpolation', new_sk.interpolation)
if apply_drivers:
stored_drivers = sk_data.get('drivers')
if stored_drivers:
for sk_driver_dict in stored_drivers:
# Value will be replaced in populate_driver_data/populate_fcurve
dr = new_sk.driver_add('value', -1)
fc_dr_utils.populate_driver_data(sk_driver_dict, dr)
def apply_all_shape_keys(obj):
apply_shape_sk = obj.shape_key_add(name='temp_sk', from_mix=True)
shape_keys = obj.data.shape_keys.key_blocks
for _ in range(len(shape_keys)):
if shape_keys[0].name != apply_shape_sk.name:
obj.shape_key_remove(shape_keys[0])
obj.shape_key_clear()
def remove_all_sk_apply_basis(obj, apply_basis=True):
key_blocks = obj.data.shape_keys.key_blocks
for sk in range(len(key_blocks))[::-1]:
if len(key_blocks) > (1 if apply_basis else 0):
last_sk = obj.data.shape_keys.key_blocks[-1]
obj.shape_key_remove(last_sk)
if apply_basis:
apply_all_shape_keys(obj)
| V-Sekai/V-Sekai.blender-game-tools | addons/faceit/core/shape_key_utils.py | shape_key_utils.py | py | 9,173 | python | en | code | 7 | github-code | 13 |
3423452850 | import streamlit as st
from st_aggrid import AgGrid, GridOptionsBuilder
from st_aggrid.shared import GridUpdateMode, DataReturnMode
from st_aggrid.shared import JsCode
import pandas as pd
import pickle
import os
from glob import glob
try:
import config
except:
from ntld import config
LOCATIONS = tuple(sorted(config.LOCATIONS))
DATASETS = {
'customers': 'customers_training.csv',
'last daily transactions': 'daily_sequence_training.csv',
'monthly rolling stats': 'rolling_stats.csv',
'blank periods': 'training_blank_periods.csv'
}
def list_datasets(location):
location = location.lower().strip()
path = os.path.join(config.PATH_TO_JOBS, location, 'data', '*.csv')
files = list(map(lambda x: os.path.basename(x), glob(path)))
return files
def check_model_availability(location):
path = os.path.join(config.PATH_TO_JOBS, location, 'models', 'rules_models.pkl')
return os.path.isfile(path)
def load_dataset(location: str, dataset: str):
assert location in LOCATIONS
assert dataset in DATASETS.keys()
path = os.path.join(config.PATH_TO_JOBS, location, 'data', DATASETS[dataset])
return pd.read_csv(path)
def aggrid_interactive_table(df: pd.DataFrame):
options = GridOptionsBuilder.from_dataframe(
df, enableRowGroup=True, enableValue=True, enablePivot=True
)
options.configure_side_bar()
options.configure_selection(selection_mode="multiple", use_checkbox=True)
gridOptions = options.build()
response = AgGrid(
df,
gridOptions=gridOptions,
enable_enterprise_modules=True,
update_mode=GridUpdateMode.MODEL_CHANGED,
data_return_mode=DataReturnMode.FILTERED_AND_SORTED,
fit_columns_on_grid_load=False,
reload=True
)
# selection = AgGrid(
# df,
# enable_enterprise_modules=True,
# gridOptions=gridOptions,
# theme="light",
# update_mode=GridUpdateMode.MODEL_CHANGED,
# allow_unsafe_jscode=True,
# )
return response
st.cache(suppress_st_warning=True)
def main():
html_temp = """
<div style ="background-color:yellow;padding:13px">
<h1 style ="color:black;text-align:center;">NTLD ML</h1>
</div>
"""
st.markdown(html_temp, unsafe_allow_html = True)
Location = st.selectbox('Location', LOCATIONS)
Dataset = st.selectbox('dataset', tuple(DATASETS.keys()))
if st.button('LoadData'):
st.write(f"location: {Location}")
st.write(f"dataset: {Dataset}")
st.write(f"rules model available: {check_model_availability(Location)}")
data = load_dataset(Location, Dataset)
response = aggrid_interactive_table(df=data)
st.subheader("Filtered data will appear below 👇 ")
st.text("")
if response:
df = pd.DataFrame(response["selected_rows"])
st.table(df)
if __name__=='__main__':
main() | kthouz/streamlit_app | app.py | app.py | py | 2,957 | python | en | code | 0 | github-code | 13 |
14412323510 | # This file is part of Korman.
#
# Korman is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Korman is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Korman. If not, see <http://www.gnu.org/licenses/>.
import bpy
class ObjectButtonsPanel:
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_context = "physics"
@classmethod
def poll(cls, context):
return context.object and context.scene.render.engine == "PLASMA_GAME"
class BlenderObjectSearchPanel(ObjectButtonsPanel, bpy.types.Panel):
bl_label = ""
bl_options = {"HIDE_HEADER"}
def draw(self, context):
# Yes, this is stolen shamelessly from bl_ui
layout = self.layout
space = context.space_data
if space.use_pin_id:
layout.template_ID(space, "pin_id")
else:
row = layout.row()
row.template_ID(context.scene.objects, "active")
class PlasmaObjectPanel(ObjectButtonsPanel, bpy.types.Panel):
bl_label = "Plasma Object"
def draw_header(self, context):
self.layout.prop(context.object.plasma_object, "enabled", text="")
def draw(self, context):
layout = self.layout
pl_obj = context.object.plasma_object
pl_age = context.scene.world.plasma_age
layout.active = pl_obj.enabled
# It is an error to put objects in the wrong types of pages/
active_page = next((i for i in pl_age.pages if i.name == pl_obj.page), None)
is_external_page = active_page.page_type == "external" if active_page else False
# Which page does this object go in?
# If left blank, the exporter puts it in page 0 -- "Default"
layout.alert = is_external_page
layout.prop_search(pl_obj, "page", pl_age, "pages", icon="BOOKMARKS")
layout.alert = False
if is_external_page:
layout.label("Objects cannot be exported to External pages.", icon="ERROR")
class PlasmaNetPanel(ObjectButtonsPanel, bpy.types.Panel):
bl_label = "Plasma Synchronization"
bl_options = {"DEFAULT_CLOSED"}
def draw_header(self, context):
self.layout.prop(context.object.plasma_net, "manual_sdl", text="")
def draw(self, context):
layout = self.layout
pl_net = context.object.plasma_net
layout.active = pl_net.manual_sdl
for i in sorted(pl_net.sdl_names):
layout.prop(pl_net, i)
| H-uru/korman | korman/ui/ui_object.py | ui_object.py | py | 2,883 | python | en | code | 31 | github-code | 13 |
38474973195 | # 1
import math
def n_queen(n: int) -> [[int]]:
def helper(row):
if row == n:
result.append(list(col_placement))
return
else:
for col in range(n):
if all(abs(col - c) not in (0, row - i) for i, c in enumerate(col_placement[:row])):
col_placement[row] = col
helper(row + 1)
result: [[int]] = []
col_placement: [int] = [0] * n
# start with first row
helper(0)
return result
# print(n_queen(4))
# 2
# O(n*n!) - there are n! permutations for n elements
def allpermutations(A: [int]) -> [[int]]:
def helper(i: int) -> None:
if i == len(A):
result.append(A.copy())
for j in range(i, len(A)):
A[i], A[j] = A[j], A[i]
helper(i + 1)
A[i], A[j] = A[j], A[i]
result: [[int]] = []
helper(0)
return result
# A = [2, 3, 5, 7]
# p = allpermutations(A)
# print(len(p), p)
# 3
# time - O(n)
# space - O(1)
def next_biggest(A: [int]) -> None:
inflectionpoint = -1
i: int = len(A) - 2
# longest non-decreasing sub array from last
while i >= 0:
if A[i] >= A[i + 1]:
i -= 1
else:
inflectionpoint = i
break
if inflectionpoint == -1:
return None
# replace the inflection point item with the smallest item greater than it in the non-decreasing sub array from last
i = len(A) - 1
while i > inflectionpoint:
if A[i] > A[inflectionpoint]:
A[i], A[inflectionpoint] = A[inflectionpoint], A[i]
break
else:
i -= 1
# reverse items after inflection point
A[inflectionpoint + 1:] = reversed(A[inflectionpoint + 1:])
return A
# A= [1,0,3,2]
# print(next_biggest(A))
def allpermutations2(A: [int]) -> [[int]]:
result = []
while True:
result.append(A.copy())
A = next_biggest(A)
if not A:
break
return result
# A = [2, 3, 5, 7]
# p = allpermutations2(A)
# print(len(p), p)
# 4
def all_subsequence(A: [int]) -> [[int]]:
n: int = len(A)
def helper(i: int = 0, current: [int] = []) -> None:
if i == n:
if len(current) > 0:
arrays.append(current)
else:
# does not include current element
helper(i + 1, current)
# include current element
helper(i + 1, current + [A[i]])
arrays: [[int]] = []
helper()
return arrays
# print(all_subsequence([1, 2, 3]))
def all_subsequence(A: [int]) -> [[int]]:
n = len(A)
power_set = []
# sub seq length = 2^n
# 1<<n == 2^n
for i in range(1 << n):
bit_array = i
subset = []
while bit_array:
print(bit_array, ~(bit_array - 1), bit_array & ~(bit_array - 1),
int(math.log2(bit_array & ~(bit_array - 1))))
subset.append(A[int(math.log2(bit_array & ~(bit_array - 1)))])
bit_array &= bit_array - 1
power_set.append(subset)
print("=================")
return power_set
print(all_subsequence([7, 8, 6]))
| bkgsur/Algo | prep/recursion.py | recursion.py | py | 3,158 | python | en | code | 0 | github-code | 13 |
70776602578 | import pandas as pd
import xarray as xr
import numpy as np
import geopandas as gpd
import plotly.express as px
import plotly.graph_objects as go
from shapely.geometry import Point
from geopandas import GeoDataFrame
from shapely.ops import nearest_points
from shapely.geometry import MultiPoint
######################################################################
class choropleth_function:
# Choropleth class function
######################################################################
def __init__(self,
variable_input, variable_name, unit, threshold, # meteorological variables
start_date, end_date, years_number, # temporal variables
text_description, period, legend, filename, # description variables
text_marker, location_marker, list_of_latitudes, list_of_longitudes, color_list, marker_size,# marker variables
color_continuous_scale) :
# Variables will be chosen by the user in the input file
self.variable_input = variable_input
self.variable_name = variable_name
self.unit = unit
self.threshold = threshold
self.start_date = start_date
self.end_date = end_date
self.years_number = years_number
self.period = period
self.legend = legend
self.filename = filename
self.location_marker = location_marker
self.list_of_latitudes = list_of_latitudes
self.list_of_longitudes = list_of_longitudes
self.color_list = color_list
self.text_description = text_description
self.text_marker = text_marker
self.marker_size = marker_size
self.color_continuous_scale = color_continuous_scale
######################################################################
def map_division(self):
# Chosing .json file where the data will be displayed (here we only have european ones for personal purposes)
while True:
geo_data = input(">>> Choose a european map divison: C (Countries) / R (Regions) / ER (Enlarged Regions) = ")
# User is choosing between 3 given choice of map division
if geo_data == 'C':
# European Countries division
geo_data_used = gpd.read_file('europe_countries.geojson')
geo_data_used = geo_data_used.set_index(['UN']).sort_values(by = ['UN'])
geo_data_used_without_index = geo_data_used.reset_index()
print("Your choice has been successfully saved.")
return geo_data_used, geo_data_used_without_index
if geo_data == 'R':
# European Regions division
geo_data_used = gpd.read_file('europe_nutsrg_2.json')
geo_data_used = geo_data_used.sort_values(by = ['id']).set_index(['id'])
geo_data_used_without_index = geo_data_used.reset_index()
print("Your choice has been successfully saved.")
return geo_data_used, geo_data_used_without_index
if geo_data == 'ER':
# Enlarged European Regions division
geo_data_used = gpd.read_file('europe_nutsrg_1.json')
geo_data_used = geo_data_used.sort_values(by = ['id']).set_index(['id'])
geo_data_used_without_index = geo_data_used.reset_index()
print("Your choice has been successfully saved.")
return geo_data_used, geo_data_used_without_index
else :
print('Error: Please answer C or R or ER to the previous question.')
######################################################################
def open_file(self):
# NetCDF Files opening sliced by start date, end date, european latitude and longitude
print("> Files opening in progress...")
ds = xr.open_mfdataset(self.filename, autoclose=True)
ds = ds.sel(time = slice(self.start_date, self.end_date), rlat=slice(-15, 20), rlon=slice(-20,15))
ds = ds.load() # Loading sliced files in RAM
print("> Files successfully opened.")
return ds
######################################################################
def files_location_points(self, ds):
# Extraction of longitude and latitude of netCDF files
print('> Files location points extraction in progress...')
df= pd.DataFrame()
lon = ds.lon.values
lat = ds.lat.values
rlat_list, rlon_list, lat_list, lon_list =[], [], [], []
for x, y in [(x,y) for x in ds.rlat.values for y in ds.rlon.values]:
# Loop will extract longitude and latitude values given in NetCDF files
# Be aware that not all NetCDF files contain rlon and rlat
rlat_list.append(x)
rlon_list.append(y)
local_ds=ds.sel(rlat=x,rlon=y)
lat_list.append(local_ds.lat.values)
lon_list.append(local_ds.lon.values)
df = pd.DataFrame(
# Dataframe will contain all values of lat, lon, rlat and rlon given in sliced NetCDF
{
"lat": lat_list,
"lon": lon_list,
"rlat": rlat_list,
"rlon" : rlon_list
})
print("> Location points successfully extracted from files.")
return df
######################################################################
def european_area(self, df):
# Extraction of file points on european area (!!!Missing Russia and Iceland!!!)
print('> European location points extraction in progress...')
geometry = [Point(xy) for xy in zip(df.lon, df.lat)]
df_tmp = df.drop(['rlon', 'rlat', 'lon', 'lat'], axis = 1)
gdf = GeoDataFrame(df, crs = "EPSG:4326", geometry = geometry)
# Create a geodataframe with all given latitudes and longitudes in df
world = gpd.read_file(gpd.datasets.get_path('naturalearth_lowres'))
bords_europe = world[world.continent=="Europe"]
bords_europe = bords_europe[(bords_europe.name!="Russia") & (bords_europe.name!= "Iceland")].drop(['pop_est', 'continent','name','iso_a3','gdp_md_est'], axis = 1)
# Save all POINTS in Europe area
initial_european_data = gpd.sjoin(gdf, bords_europe, op = "within")
initial_european_data = initial_european_data.drop(['index_right'], axis = 1)
# Merge the two previous geodataframe to keep only the european POINTS for which we have data
print("> European location points successfully extracted.")
return initial_european_data
######################################################################
def temperature_max_computation(self, ds, initial_european_data):
# Number of days with temperature above the threshold computation on temporal range (by POINTS)
print("> Temperature days computation on geographical range in progress...")
SummList = []
for i in range(len(initial_european_data)):
# Loop will search for every POINT (rlat, rlon) the corresponding data
vals = ds.sel(rlat = initial_european_data['rlat'].iloc[i], rlon = initial_european_data['rlon'].iloc[i])[self.variable_name].values
su = [1 if v - 273.15 >= self.threshold else 0 for v in vals]
# We aim at counting the days that respect the threshold given by the user
# As we will give the temperature in celsius degree we convert 0 °C + 273.15 = 273.15 K
SummList.append(sum(su) / self.years_number)
# This is the average days by year the respect the threshold given
initial_european_data[self.legend] = SummList
# Saving the list of data in final dataframe
print("> Temperature days successfully computed on geographical range.")
return initial_european_data
######################################################################
def temperature_min_computation(self, ds, initial_european_data):
# Number of days with temperature above the threshold computation on temporal range (by POINTS)
print("> Temperature days computation on geographical range in progress...")
MinList = []
for i in range(len(initial_european_data)):
# Loop will search for every POINT (rlat, rlon) the corresponding data
vals = ds.sel(rlat = initial_european_data['rlat'].iloc[i], rlon = initial_european_data['rlon'].iloc[i])[self.variable_name].values
su = [1 if v - 273.15 <= self.threshold else 0 for v in vals]
# We aim at counting the days that respect the threshold given by the user
# As we will give the temperature in celsius degree we convert 0 °C + 273.15 = 273.15 K
MinList.append(sum(su) / self.years_number)
# This is the average days by year the respect the threshold given
initial_european_data[self.legend] = MinList
# Saving the list of data in final dataframe
print("> Temperature days successfully computed on geographical range.")
return initial_european_data
######################################################################
def precipitation_computation(self, ds, initial_european_data):
# Number of days with precipitation above the threshold computation on temporal range (by POINTS)
print("> Precipitation days computation on geographical range in progress...")
PrecipList = []
for i in range(len(initial_european_data)):
# Loop will search for every POINT (rlat, rlon) the corresponding data
vals = ds.sel(rlat = initial_european_data['rlat'].iloc[i], rlon = initial_european_data['rlon'].iloc[i])[self.variable_name].values
su = [1 if v * 86400 >= self.threshold else 0 for v in vals]
# We aim at counting the days that respect the threshold given by the user
# As we will give the temperature in celsius degree we convert 0 °C + 273.15 = 273.15 K
PrecipList.append(sum(su) / self.years_number)
# This is the average days by year the respect the threshold given
initial_european_data[self.legend] = PrecipList
# Saving the list of data in final dataframe
print("> Precipitation days successfully computed on geographical range.")
return initial_european_data
######################################################################
def data_with_index(self, final_european_data, geo_data_used_without_index):
# Maximum of days in POLYGON
print('> Region data computation in progress...')
datum = []
# Initialized list
for ligne in range(len(geo_data_used_without_index)):
# Loop will provide the maximum data by each geographical division
pip_data = final_european_data.loc[final_european_data.within(geo_data_used_without_index.loc[ligne, 'geometry'])]
# Verify if POINT is contained in POLYGONS of the geodataframe that the user chose
datum.append(pip_data[self.legend].mean())
# Keep the mean of the POLYGON
data = pd.DataFrame(datum)
data['id'] = geo_data_used_without_index['id']
data['geometry'] = geo_data_used_without_index['geometry']
data[self.legend] = data[0]
data['na'] = geo_data_used_without_index['na']
data = data.sort_values(by =['id']).set_index(['id'])
print('> Region data successfully computed.')
return data
######################################################################
def data_without_index(self, data):
# Save dataframe without index (will be used choropleth function)
data_without_index = data.reset_index()
return data_without_index
######################################################################
def data_max(self, data):
# Save data maximum (in order to custom it on choropleth map)
return data [self.legend].max()
######################################################################
def choropleth_map_without_marker(self, data, geo_data_used, maxi):
# Write an html file for choropleth map (without marker)
print('> Map in creation...')
fig_without_marker = go.Figure(px.choropleth_mapbox(data_frame = data, # data by the region of geodataframe chosen
locations = data.index,
geojson = geo_data_used,# geodataframe chose
color = self.legend, # custom legend
hover_name = 'na',
title = self.legend, # custom legend
mapbox_style ='carto-positron',
center = {'lat':47, 'lon':6},
zoom = 3.2,
opacity = 1,
color_continuous_scale = self.color_continuous_scale,
range_color = [0, maxi]
))
fig_without_marker.update_geos(fitbounds = "locations")
fig_without_marker.update_layout(title_text = self.legend, title_x = 0.5)
fig_without_marker.write_html(f"{self.variable_name}_{self.period}_{self.threshold}_without_marker.html")
# The ouput will be given a specific name corresponding to the variable choices
print('> Map created.')
######################################################################
def choropleth_map_with_markers(self, data, geo_data_used, maxi):
# Write an html file for choropleth map (with markers)
print('> Map in creation...')
fig_with_markers= go.Figure(
px.choropleth_mapbox(
data_frame = data, # data by the region of geodataframe chosen
locations = data.index,
geojson = geo_data_used, #geodataframe chosen
color = self.legend, # custom legend
hover_name = 'na',
title = self.legend, # custom legend
mapbox_style = 'carto-positron',
center = {'lat':47, 'lon':6},
zoom = 3.2,
opacity = 1,
color_continuous_scale = self.color_continuous_scale,
range_color = [0, maxi]
)
)
fig_with_markers.update_geos(fitbounds = "locations")
fig_with_markers.update_layout(title_text = self.legend, title_x = 0.5)
fig_with_markers.add_scattermapbox(
lat = self.list_of_latitudes,
lon = self.list_of_longitudes,
mode = 'markers+text',
text = self.text_marker,
below ='',
marker_size = self.marker_size,
marker_color = self.color_list
)
fig_with_markers.write_html(f"{self.variable_name}_{self.period}_{self.threshold}_with_markers.html")
# The ouput will be given a specific name corresponding to the variable choices
print('> Map created.') | dorotheekar/choropleth-ipcc-projections | main.py | main.py | py | 14,852 | python | en | code | 1 | github-code | 13 |
35055076768 | from flask import Flask, render_template, request, redirect, url_for, flash
from flask_mysqldb import MySQL
app = Flask(__name__)
app.config['MYSQL_HOST'] = 'b64b8nqmxb1ttbufoxjg-mysql.services.clever-cloud.com'
app.config['MYSQL_USER'] = 'up1hh0qi2xsonjuq'
app.config['MYSQL_PASSWORD'] = 'a5nRziQvat1I7BeZ22np'
app.config['MYSQL_DB'] = 'b64b8nqmxb1ttbufoxjg'
mysql = MySQL(app)
@app.route('/')
def Index():
cur = mysql.connection.cursor()
cur.execute('SELECT pago.id_pago, servicio.nom_servicio, metodo_pago.nom_met_pago, pago.nom_cliente, pago.documento, pago.valor_pagar, pago.fecha_hora FROM pago JOIN servicio ON pago.id_servicio = servicio.id_servicio JOIN metodo_pago ON pago.id_met_pago = metodo_pago.id_met_pago')
data = cur.fetchall()
return render_template('index.html', Pagos=data)
@app.route('/add_venta', methods=['POST'])
def add_venta():
if request.method == 'POST':
id_servicio = request.form['id_servicio']
id_met_pago = request.form['id_met_pago']
nom_cliente = request.form['nom_cliente']
documento = request.form['documento']
valor_pagar = request.form['valor_pagar']
cur = mysql.connection.cursor()
cur.execute('INSERT INTO pago (id_servicio, id_met_pago, nom_cliente ,documento, valor_pagar, fecha_hora) VALUES (%s, %s, %s, %s, %s, CURRENT_TIMESTAMP)',
(id_servicio, id_met_pago, nom_cliente, documento, valor_pagar))
mysql.connection.commit()
return redirect(url_for('Index'))
@app.route('/edit_ventas/<id>')
def get_ventas(id):
cur = mysql.connection.cursor()
cur.execute('SELECT * FROM pago WHERE id_pago = {0}'.format(id))
data = cur.fetchall()
return render_template('edit.html', Pagos=data[0])
@app.route('/update/<id>', methods=['POST'])
def update_venta(id):
if request.method == 'POST':
id_servicio = request.form['id_servicio']
id_met_pago = request.form['id_met_pago']
nom_cliente = request.form['nom_cliente']
documento = request.form['documento']
valor_pagar = request.form['valor_pagar']
cur = mysql.connection.cursor()
cur.execute(""" UPDATE pago SET id_servicio = %s, id_met_pago = %s, nom_cliente = %s, documento = %s, valor_pagar = %s, fecha_hora = CURRENT_TIMESTAMP WHERE id_pago = {0} """.format(id),
(id_servicio, id_met_pago, nom_cliente, documento, valor_pagar))
mysql.connection.commit()
return redirect(url_for('Index'))
@app.route('/delete/<string:id>')
def delete_venta(id):
cur = mysql.connection.cursor()
cur.execute('DELETE FROM pago WHERE id_pago = {0}'.format(id))
mysql.connection.commit()
return redirect(url_for('Index'))
if __name__ == '__main__':
app.run(port=3306, debug=True)
| RONY4ALL/pago_servicio_eje3 | App.py | App.py | py | 2,862 | python | en | code | 0 | github-code | 13 |
72755437457 |
import numpy as np
from enum import Enum
def sRb(q):
sq, cq = np.sin(q), np.cos(q)
return np.array([
[cq, -sq],
[sq, cq]
])
class rphase(Enum):
""" enumerate for different phases in the jumping locomotion
"""
TD = 0 # touchdown
SQD = 1 # squat down
BOTTOM = 2 # bottom
STU = 3 # stand up
LO = 4 # liftoff
RIS = 5 # rising
TOP = 6 # top
FAL = 7 # falling
class rd:
def __init__(self,
MB=10.0,
MF=1.0,
IB=10.0,
IF=1.0,
L=1.3,
la=0.05,
c=0.3,
ks=1000.2,
kt=10000.0,
x0s=1.0,
la_min = 0.1,
la_max = 0.5,
):
self.time_elapsed = 0.0
self.dt = 0.001
self.params = [MB, MF, IB, IF, L, la, c, ks, kt, x0s]
self.g = 9.81
self.damping = 1e-5
self.Th, self.dTh, self.ddTh = [], [], []
self.contact_point = None
self.status = None
self.qTD, self.thLO = 0.0, 0.0
self.la_min, self.la_max = la_min, la_max
self.la_tar, self.la0, self.la_t0 = 0.05, 0.0, 0.0
self.dxD, self.x_lengthen = 0.0, 0.02
self.test = 0.0
def set_state(self, state):
self.Th = np.array(state[:5])
self.dTh = np.array(state[5:])
@property
def state(self):
return np.r_[self.Th, self.dTh]
return np.r_[self.Th, self.dTh].tolist()
def model(self, state, t=None, tau=0):
MB, MF, IB, IF, L, la, c, ks, kt, x0s = self.params
g, damping = self.g, self.damping
x, y, th, q, l = self.Th
dx, dy, dth, dq, dl = self.dTh
cq, sq, cth, sth = np.cos(q), np.sin(q), np.cos(th), np.sin(th)
cqth, sqth = np.cos(q+th), np.sin(q+th)
k = ks if (l < x0s) and (l > 0.0) else kt
M = np.array([
[MB+MF,0,MF*(c*cth+cqth*(-(L/2)+la+l)),MF*cqth*(-(L/2)+la+l),MF*sqth],
[0,MB+MF,MF*(c*sth+(-(L/2)+la+l)*sqth),MF*(-(L/2)+la+l)*sqth,-MF*cqth],
[MF*(c*cth+cqth*(-(L/2)+la+l)),MF*(c*sth+(-(L/2)+la+l)*sqth),IB+IF+MF*(c*cth+cqth*(-(L/2)+la+l))**2+MF*(c*sth+(-(L/2)+la+l)*sqth)**2,IF+(L**2*MF)/4-L*la*MF+la**2*MF-1/2*c*(L-2*la)*MF*cq-MF*(L-2*la-c*cq)*l+MF*l**2,c*MF*sq],
[MF*cqth*(-(L/2)+la+l),MF*(-(L/2)+la+l)*sqth,IF+(L**2*MF)/4-L*la*MF+la**2*MF-1/2*c*(L-2*la)*MF*cq-MF*(L-2*la-c*cq)*l+MF*l**2,IF+1/4*(L-2*la)**2*MF-(L-2*la)*MF*l+MF*l**2,0],
[MF*sqth,-MF*cqth,c*MF*sq,0,MF]
], dtype=np.float)
h = np.array([
1/2*MF*(-2*c*sth*dth**2+cqth*(4*dl*dq+4*dl*dth)+sqth*(L*dq**2-2*la*dq**2-2*l*dq**2+2*L*dq*dth-4*la*dq*dth-4*l*dq*dth+L*dth**2-2*la*dth**2-2*l*dth**2)),
1/2*(2*g*MB+2*g*MF+2*c*MF*cth*dth**2+sqth*(4*MF*dl*dq+4*MF*dl*dth)+cqth*(-L*MF*dq**2+2*la*MF*dq**2+2*MF*l*dq**2-2*L*MF*dq*dth+4*la*MF*dq*dth+4*MF*l*dq*dth-L*MF*dth**2+2*la*MF*dth**2+2*MF*l*dth**2)),
1/2*MF*(2*c*g*sth+(-g*L+2*g*la+2*g*l)*sqth-2*L*dl*dq+4*la*dl*dq+4*l*dl*dq-2*L*dl*dth+4*la*dl*dth+4*l*dl*dth+cq*(4*c*dl*dq+4*c*dl*dth)+sq*(c*L*dq**2-2*c*la*dq**2-2*c*l*dq**2+2*c*L*dq*dth-4*c*la*dq*dth-4*c*l*dq*dth)),
-(1/2)*MF*(L-2*la-2*l)*(g*sqth+2*dl*dq+2*dl*dth+c*sq*dth**2),
1/2*(-2*k*x0s-2*g*MF*cqth+2*k*l+L*MF*dq**2-2*la*MF*dq**2-2*MF*l*dq**2+2*L*MF*dq*dth-4*la*MF*dq*dth-4*MF*l*dq*dth+L*MF*dth**2-2*la*MF*dth**2-2*c*MF*cq*dth**2-2*MF*l*dth**2),
], dtype=np.float)
# damping
bs = 0.00001 if (l < x0s) and (l > 0.0) else 125.0
d = np.array([
[damping, 0, 0, 0, 0],
[0, damping, 0, 0, 0],
[0, 0, damping, 0, 0],
[0, 0, 0, damping, 0],
[0, 0, 0, 0, bs]
], dtype=np.float)
# contact
peef = np.array([
c*sth+(la+l)*sqth+x,
-c*cth-cqth*(+la+l)+y,
], dtype=np.float)
if peef[1] < 0:
if self.contact_point is None:
self.contact_point = np.array([peef[0], 0.0])
Jeef = np.array([
[1,0,c*cth+cqth*(la+l),cqth*(la+l),sqth],
[0,1,c*sth+(la+l)*sqth,(la+l)*sqth,-cqth],
], dtype=np.float)
veef = Jeef @ self.dTh
bc = np.array([[275.0, 0],[0, 275.0]], dtype=np.float)
kc = np.array([[30000.0, 0.0],[0.0, 30000.0]],dtype=np.float)
fc = - kc @ (peef - self.contact_point) - bc @ veef
tau_c = Jeef.T @ fc
# fc, kc, bc, mu = np.zeros(2), 10000.0, 125.0, 0.0
# fc[1] = - kc * (peef[1] - self.contact_point[1]) - bc * veef[1]
# fc[0] = - np.sign(veef[0])*mu*fc[1]
# tau_c = Jeef.T @ fc
# invM = np.linalg.inv(M)
# ddTheta = invM @ (tau - h - d @ self.dTh)
# A = Jeef
# P = np.eye(A.shape[1]) - invM @ A.T @ np.linalg.inv(A @ invM @ A.T) @ A
# ddTheta = P @ ddTheta
else:
self.contact_point = None
tau_c = np.zeros(5)
# invM = np.linalg.inv(M)
# ddTheta = invM @ (tau - h - d @ self.dTh + tau_c)
ddTheta = np.linalg.solve(M, tau - h - d @ self.dTh + tau_c)
return np.r_[self.dTh, ddTheta]
def controller_func(self):
tau = np.zeros(5)
MB, MF, IB, IF, L, la, c, ks, kt, x0s = self.params
x, y, th, q, l = self.Th
dx, dy, dth, dq, dl = self.dTh
dxD, thD = self.dxD, 0.0
dx_max = 0.8
x_lengthen = self.x_lengthen
# if self.time_elapsed > 3:
# dxD, thD = 0.5, 0.0
# # dxD = -np.sign(x-0)*min([np.abs(3*(x-0)), 0.5])
# dx_max = dxD - 0.1
# # x_lengthen = 0.05 + 0.05
# if self.time_elapsed > 20:
# dxD, thD = 0.3, 0.0
# dx_max = 0.5
# # x_lengthen = 0.05 + 0.1
if np.abs(dxD) > 0.005:
K1, K2, K3 = 0.3, 0.0, 0.0
else:
K1, K2, K3 = 0.2, 0.3, 0.4
if self.status is None:
self.qTD = self._fp_controller(dxD, thD, dx_max, K1, K2, K3)
self._set_status()
if self.status == rphase.BOTTOM:
# self.la0, self.la_t0 = la, self.time_elapsed
self.la_tar = x_lengthen + self.la_min
if self.status == rphase.LO:
# self.la0, self.la_t0 = la, self.time_elapsed
self.la_tar = self.la_min
# if (self.status == rphase.FAL) or (self.status == rphase.TOP):
if (self.status == rphase.RIS) or (self.status == rphase.FAL) or (self.status == rphase.TOP):
self.qTD = self._fp_controller(dxD, thD, dx_max, K1, K2, K3)
Kp, Kv = 4500.0, 450.0
tau[3] = -(Kp*(q - self.qTD) + Kv*(dq))
if self.status == rphase.TD:
self.thLO = -th
if (self.status == rphase.TD)or(self.status == rphase.SQD) or (self.status == rphase.STU) or (self.status == rphase.BOTTOM):
Kp, Kv = 3000, 325.0
tau[3] = Kp*(th - self.thLO) + Kv*(dth)
lg = la + l
self.params[5] = self._la_controller(k = 200)
l = lg - self.params[5]
self.set_state([x, y, th, q, l, dx, dy, dth, dq, dl ])
return tau
def _set_status(self):
MB, MF, IB, IF, L, la, c, ks, kt, x0s = self.params
x, y, th, q, l = self.Th
dx, dy, dth, dq, dl = self.dTh
cq, sq, cth, sth = np.cos(q), np.sin(q), np.cos(th), np.sin(th)
cqth, sqth = np.cos(q+th), np.sin(q+th)
peef = np.array([
c*sth+(la+l)*sqth+x,
-c*cth-cqth*(+la+l)+y,
], dtype=np.float)
if self.status == rphase.FAL:
if peef[1] < 0:
self.status = rphase.TD
elif self.status == rphase.TD:
self.status = rphase.SQD
elif self.status == rphase.SQD:
if dy > 0:
self.status = rphase.BOTTOM
elif self.status == rphase.BOTTOM:
self.status = rphase.STU
elif self.status == rphase.STU:
if peef[1] > 0:
self.status = rphase.LO
elif self.status == rphase.LO:
self.status = rphase.RIS
elif self.status == rphase.RIS:
if dy < 0:
self.status = rphase.TOP
elif self.status == rphase.TOP:
self.status = rphase.FAL
else:
if peef[1] < 0:
if dy < 0:
self.status = rphase.SQD
else:
self.status = rphase.STU
else:
if dy < 0:
self.status = rphase.FAL
else:
self.status = rphase.RIS
def _la_controller(self, k):
MB, MF, IB, IF, L, la, c, ks, kt, x0s = self.params
if self.la_tar is None:
return la
res_la = la + k*(self.la_tar - la)*self.dt
# res_la = self.la0 + np.sign(self.la_tar - la) * k * (self.time_elapsed - self.la_t0)**2
if res_la >= self.la_tar:
return self.la_tar
if res_la <= self.la_min:
return self.la_min
if res_la >= self.la_max:
return self.la_max
return res_la
def _fp_controller(self, dxD, thD, dx_max, K1, K2, K3):
MB, MF, IB, IF, L, la, c, ks, kt, x0s = self.params
x, y, th, q, l = self.Th
dx, dy, dth, dq, dl = self.dTh
cq, sq, cth, sth = np.cos(q), np.sin(q), np.cos(th), np.sin(th)
if np.abs(dxD) > 0.005:
tST = np.pi * np.sqrt(MB/ks)
if dxD < dx - dx_max :
xST = tST * (dx - dx_max)
elif dxD > dx + dx_max :
xST = tST * (dx + dx_max)
else:
xST = tST * dxD
else:
xST = 0.0
xERR = K1*(dx-dxD) - K2*(th-thD) - K3*(dth)
xTD = (la+l)*(-c*sth*MB + (MB+MF)*xERR)/(L/2*MF+(la+l)*MB) + xST/2.0
while np.abs(xTD) > (la+l):
xERR = xERR*0.95
xTD = (la+l)*(-c*sth*MB + (MB+MF)*xERR)/(L/2*MF+(la+l)*MB) + xST/2.0
qTD = np.arcsin(xTD/(la+l)) - th
return qTD
@property
def vis_body(self):
MB, MF, IB, IF, L, la, c, ks, kt, x0s = self.params
x, y, th, q, l = self.Th
cb, cf, cj = self.vis_point
pb = cb[0].reshape(2,1) + sRb(th) @ np.array([
[0, -c/4],
[3*c/8, -c/4],
[3*c/8, c/4],
[-3*c/8, c/4],
[-3*c/8, -c/4],
[0, -c/4],
[0, -c],
]).T
pf = cf[0].reshape(2,1) + sRb(q+th) @ np.array([
[0.0, L/2],
[0.0, -L/2],
]).T
pa = cj[0].reshape(2,1) + sRb(q+th) @ np.array([
[0.0, 0.0],
[0.0, -la],
]).T
return [
(pb, 1.2, 'blue'),
(pf, 1.2, 'blue'),
(pa, 1.8, 'r')
]
@property
def vis_point(self):
MB, MF, IB, IF, L, la, c, ks, kt, x0s = self.params
x, y, th, q, l = self.Th
cq, sq, cth, sth = np.cos(q), np.sin(q), np.cos(th), np.sin(th)
cqth, sqth = np.cos(q+th), np.sin(q+th)
cb = np.array([ x , y ])
cj = cb + np.array([c*sth, -c*cth])
cf = cj + np.array([(l+la-L/2)*sqth, -(l+la-L/2)*cqth])
return [
(cb, 2.5, 'indigo'),
(cf, 2.5, 'indigo'),
(cj, 2.5, 'green')
]
@property
def vis_lim(self):
MB, MF, IB, IF, L, la, c, ks, kt, x0s = self.params
cb, cf, cj = self.vis_point
ymax, ymin = cb[0][1] + L, cb[0][1] - L
xmax, xmin = cb[0][0] + L, cb[0][0] - L
return xmax, xmin, ymax, ymin
@property
def vis_text(self):
x, y, th, q, l = self.Th
dx, dy, dth, dq, dl = self.dTh
if self.status is None:
status = 'None'
else:
status = self.status.name
return [
(self.time_elapsed, (0.02, 0.96), 'time: {0:.2f} sec', 9, 'b'),
(th*180/np.pi, (0.02, 0.93), 'th: {0:.1f} deg', 9, 'dimgray'),
(q*180/np.pi, (0.02, 0.90), 'q: {0:.1f} deg', 9, 'dimgray'),
((self.qTD)*180/np.pi, (0.02, 0.87), 'qTD: {0:.1f} deg', 9, 'dimgray'),
((q-self.qTD)*180/np.pi, (0.02, 0.84), 'ERR(q-qTD): {0:.1f} deg', 9, 'dimgray'),
((self.thLO)*180/np.pi, (0.02, 0.81), 'thLO: {0:.1f} deg', 9, 'dimgray'),
((th-self.thLO)*180/np.pi, (0.02, 0.78), 'ERR(th-thLO): {0:.1f} deg', 9, 'dimgray'),
(status, (0.02, 0.75), 'status: {0}', 9, 'r'),
(dx, (0.02, 0.72), 'dx: {0:.3f} m/s', 9, 'b'),
]
| Jarvis7923/raibert-hopper-sim | src/rd.py | rd.py | py | 12,950 | python | en | code | 1 | github-code | 13 |
42523544246 | """API for Numerai Signals"""
from typing import List, Dict
import os
import codecs
import decimal
from io import BytesIO
import requests
import pandas as pd
from numerapi import base_api
from numerapi import utils
SIGNALS_DOM = "https://numerai-signals-public-data.s3-us-west-2.amazonaws.com"
class SignalsAPI(base_api.Api):
""""API for Numerai Signals"""
TICKER_UNIVERSE_URL = f"{SIGNALS_DOM}/latest_universe.csv"
HISTORICAL_DATA_URL = f"{SIGNALS_DOM}/signals_train_val_bbg.csv"
def __init__(self, *args, **kwargs):
base_api.Api.__init__(self, *args, **kwargs)
self.tournament_id = 11
def get_leaderboard(self, limit: int = 50, offset: int = 0) -> List[Dict]:
"""Get the current Numerai Signals leaderboard
Args:
limit (int): number of items to return (optional, defaults to 50)
offset (int): number of items to skip (optional, defaults to 0)
Returns:
list of dicts: list of leaderboard entries
Each dict contains the following items:
* username (`str`)
* sharpe (`float`)
* rank (`int`)
* prevRank (`int`)
* today (`float`)
* mmc (`float`)
* mmcRank (`int`)
* ic (`float`)
* icRank (`int`)
* nmrStaked (`float`)
Example:
>>> numerapi.SignalsAPI().get_leaderboard(1)
[{'prevRank': 1,
'rank': 1,
'sharpe': 2.3,
'today': 0.01321,
'username': 'floury_kerril_moodle',
'mmc': -0.0101202715,
'mmcRank': 30,
'nmrStaked': 13.0,
'ic': -0.0101202715,
'icRank': 30,
}]
"""
query = '''
query($limit: Int!
$offset: Int!) {
signalsLeaderboard(limit: $limit
offset: $offset) {
prevRank
rank
sharpe
today
username
mmc
mmcRank
nmrStaked
icRank
icRep
}
}
'''
arguments = {'limit': limit, 'offset': offset}
data = self.raw_query(query, arguments)['data']['signalsLeaderboard']
return data
def upload_predictions(self, file_path: str = "predictions.csv",
model_id: str = None,
df: pd.DataFrame = None) -> str:
"""Upload predictions from file.
Will read TRIGGER_ID from the environment if this model is enabled with
a Numerai Compute cluster setup by Numerai CLI.
Args:
file_path (str): CSV file with predictions that will get uploaded
model_id (str): Target model UUID (required for accounts
with multiple models)
df (pandas.DataFrame): Pandas DataFrame to upload, if function is
given df and file_path, df will be uploaded
Returns:
str: submission_id
Example:
>>> api = SignalsAPI(secret_key="..", public_id="..")
>>> model_id = api.get_models()['uuazed']
>>> api.upload_predictions("prediction.cvs", model_id=model_id)
'93c46857-fed9-4594-981e-82db2b358daf'
>>> # upload directly from a pandas DataFrame:
>>> api.upload_predictions(df = predictions_df, model_id=model_id)
"""
self.logger.info("uploading predictions...")
# write the pandas DataFrame as a binary buffer if provided
buffer_csv = None
if df is not None:
buffer_csv = BytesIO(df.to_csv(index=False).encode())
buffer_csv.name = file_path
auth_query = '''
query($filename: String!
$modelId: String) {
submissionUploadSignalsAuth(filename: $filename
modelId: $modelId) {
filename
url
}
}
'''
arguments = {'filename': os.path.basename(file_path),
'modelId': model_id}
submission_resp = self.raw_query(auth_query, arguments,
authorization=True)
auth = submission_resp['data']['submissionUploadSignalsAuth']
# get compute id if available and pass it along
headers = {"x_compute_id": os.getenv("NUMERAI_COMPUTE_ID")}
with open(file_path, 'rb') if df is None else buffer_csv as file:
requests.put(auth['url'], data=file.read(), headers=headers)
create_query = '''
mutation($filename: String!
$modelId: String
$triggerId: String) {
createSignalsSubmission(filename: $filename
modelId: $modelId
triggerId: $triggerId
source: "numerapi") {
id
firstEffectiveDate
}
}
'''
arguments = {'filename': auth['filename'],
'modelId': model_id,
'triggerId': os.getenv('TRIGGER_ID', None)}
create = self.raw_query(create_query, arguments, authorization=True)
return create['data']['createSignalsSubmission']['id']
def submission_status(self, model_id: str = None) -> Dict:
"""submission status of the last submission associated with the account
Args:
model_id (str)
Returns:
dict: submission status with the following content:
* firstEffectiveDate (`datetime.datetime`):
* userId (`string`)
* filename (`string`)
* id (`string`)
* submissionIp (`string`)
* submittedCount (`int`)
* filteredCount (`int`)
* invalidTickers (`string`)
* hasHistoric (`bool`)
* historicMean (`float`)
* historicStd (`float`)
* historicSharpe (`float`)
* historicMaxDrawdown (`float`)
Example:
>>> api = SignalsAPI(secret_key="..", public_id="..")
>>> model_id = api.get_models()['uuazed']
>>> api.submission_status(model_id)
{'firstEffectiveDate': datetime.datetime(2020, 5, 12, 1, 23),
'userId': "slyfox",
'filename': 'model57-HPzOyr56TPaD.csv',
'id': '1234'
'submissionIp': "102.142.12.12",
'submittedCount': 112,
'filteredCount': 12,
'invalidTickers': 'AAAPL,GOOOG',
'hasHistoric': true,
'historicMean': 1.23,
'historicStd': 2.34,
'historicSharpe': 3.45,
'historicMaxDrawdown': 4.56}
"""
query = '''
query($modelId: String) {
model(modelId: $modelId) {
latestSignalsSubmission {
id
filename
firstEffectiveDate
userId
submissionIp
submittedCount
filteredCount
invalidTickers
hasHistoric
historicMean
historicStd
historicSharpe
historicMaxDrawdown
}
}
}
'''
arguments = {'modelId': model_id}
data = self.raw_query(query, arguments, authorization=True)
status = data['data']['model']['latestSignalsSubmission']
return status
def public_user_profile(self, username: str) -> Dict:
"""Fetch the public Numerai Signals profile of a user.
Args:
username (str)
Returns:
dict: user profile including the following fields:
* username (`str`)
* startDate (`datetime`)
* id (`string`)
* bio (`str`)
* nmrStaked (`decimal.Decimal`)
Example:
>>> api = SignalsAPI()
>>> api.public_user_profile("floury_kerril_moodle")
{'bio': None,
'id': '635db2a4-bdc6-4e5d-b515-f5120392c8c9',
'startDate': datetime.datetime(2019, 3, 26, 0, 43),
'username': 'floury_kerril_moodle',
'nmrStaked': Decimal('14.630994874320760131')}
"""
query = """
query($username: String!) {
v2SignalsProfile(modelName: $username) {
id
startDate
username
bio
nmrStaked
}
}
"""
arguments = {'username': username}
data = self.raw_query(query, arguments)['data']['v2SignalsProfile']
# convert strings to python objects
utils.replace(data, "startDate", utils.parse_datetime_string)
utils.replace(data, "nmrStaked", utils.parse_float_string)
return data
def daily_model_performances(self, username: str) -> List[Dict]:
"""Fetch daily Numerai Signals performance of a model.
Args:
username (str)
Returns:
list of dicts: list of daily user performance entries
For each entry in the list, there is a dict with the following
content:
* date (`datetime`)
* corrRank (`int`)
* corrRep (`float` or None)
* mmcRank (`int`)
* mmcRep (`float` or None)
* icRank (`int`)
* icRep (`float` or None)
* corr20dRank (`int`)
* corr20dRep (`float` or None)
* mmc20dRank (`int`)
* mmc20dRep (`float` or None)
Example:
>>> api = SignalsAPI()
>>> api.daily_model_performances("floury_kerril_moodle")
[{'corrRank': 45,
'corrRep': -0.00010935616731632354,
'corr20dRank': None,
'corr20dRep': None,
'mmc20dRank': None,
'mmc20dRep': None,
'date': datetime.datetime(2020, 9, 18, 0, 0, tzinfo=tzutc()),
'mmcRank': 6,
'mmcRep': 0.0,
'icRank': 6,
'icRep': 0.0},
...
]
"""
query = """
query($username: String!) {
v2SignalsProfile(modelName: $username) {
dailyModelPerformances {
date
corrRank
corrRep
mmcRep
mmcRank
corr20dRep
corr20dRank
icRep
icRank
mmc20dRep
mmc20dRank
}
}
}
"""
arguments = {'username': username}
data = self.raw_query(query, arguments)['data']['v2SignalsProfile']
performances = data['dailyModelPerformances']
# convert strings to python objects
for perf in performances:
utils.replace(perf, "date", utils.parse_datetime_string)
return performances
def daily_user_performances(self, username: str) -> List[Dict]:
"""DEPRECATED Fetch daily Numerai Signals performance of a user.
Args:
username (str)
Returns:
list of dicts: list of daily user performance entries
For each entry in the list, there is a dict with the following
content:
* rank (`int`)
* date (`datetime`)
* sharpe (`float`)
* mmcRep (`float`)
* reputation (`float`)
Example:
>>> api = SignalsAPI()
>>> api.daily_user_performances("floury_kerril_moodle")
[{'date': datetime.datetime(2020, 5, 16, 0, 0,
'rank': 1,
'sharpe': 2.35,
'mmcRep': 0.35,
'reputation': 1.35
},
...]
"""
query = """
query($username: String!) {
signalsUserProfile(username: $username) {
dailyUserPerformances {
rank
date
sharpe
mmcRep
reputation
}
}
}
"""
self.logger.warning("Method daily_user_performances is DEPRECATED, "
"use daily_model_performances")
arguments = {'username': username}
data = self.raw_query(query, arguments)['data']['signalsUserProfile']
performances = data['dailyUserPerformances']
# convert strings to python objects
for perf in performances:
utils.replace(perf, "date", utils.parse_datetime_string)
return performances
def daily_submissions_performances(self, username: str) -> List[Dict]:
"""Fetch daily Numerai Signals performance of a user's submissions.
Args:
username (str)
Returns:
list of dicts: list of daily submission performance entries
For each entry in the list, there is a dict with the following
content:
* date (`datetime`)
* returns (`float`)
* submission_time (`datetime`)
* correlation (`float`)
* mmc (`float`)
* ic (`float`)
* roundNumber (`int`)
* corrRep (`float`)
* mmcRep (`float`)
Example:
>>> api = SignalsAPI()
>>> api.daily_submissions_performances("uuazed")
[{'date': datetime.datetime(2020, 5, 16, 0, 0),
'returns': 1.256,
'submissionTime': datetime.datetime(2020, 5, 12, 1, 23)},
'corrRep': None,
'mmc': None,
'ic': 0.11,
'mmcRep': None,
'roundNumber': 226,
'correlation': 0.03}
...
]
"""
query = """
query($username: String!) {
signalsUserProfile(username: $username) {
dailySubmissionPerformances {
date
returns
submissionTime
correlation
mmc
roundNumber
corrRep
mmcRep
ic
}
}
}
"""
arguments = {'username': username}
data = self.raw_query(query, arguments)['data']['signalsUserProfile']
performances = data['dailySubmissionPerformances']
# convert strings to python objects
for perf in performances:
utils.replace(perf, "date", utils.parse_datetime_string)
utils.replace(perf, "submissionTime", utils.parse_datetime_string)
return performances
def ticker_universe(self) -> List[str]:
"""fetch universe of accepted tickers
Returns:
list of strings: list of currently accepted tickers
Example:
>>> SignalsAPI().ticker_universe()
["MSFT", "AMZN", "APPL", ...]
"""
result = requests.get(self.TICKER_UNIVERSE_URL, stream=True)
iterator = codecs.iterdecode(result.iter_lines(), 'utf-8')
tickers = [t.strip() for t in iterator if t != 'bloomberg_ticker']
return tickers
def download_validation_data(self, dest_path: str = ".",
dest_filename: str = None) -> str:
"""download CSV file with historical targets and ticker universe
Returns:
str: path to csv file
Example:
>>> SignalsAPI().download_validation_data()
signals_train_val_bbg.csv
"""
# set up download path
if dest_filename is None:
dest_filename = "numerai_signals_historical.csv"
path = os.path.join(dest_path, dest_filename)
# create parent folder if necessary
os.makedirs(dest_path, exist_ok=True)
utils.download_file(
self.HISTORICAL_DATA_URL, path, self.show_progress_bars)
return path
def stake_get(self, username) -> decimal.Decimal:
"""get current stake for a given users
Args:
username (str)
Returns:
decimal.Decimal: current stake
Example:
>>> SignalsAPI().stake_get("uuazed")
Decimal('14.63')
"""
data = self.public_user_profile(username)
return data['totalStake']
| nikampe/Numerai_Models | venv/lib/python3.9/site-packages/numerapi/signalsapi.py | signalsapi.py | py | 17,006 | python | en | code | 0 | github-code | 13 |
11375636224 | from cs50 import get_string
students = []
for i in range(3):
name = get_string("Name: ")
dorm = get_string("Dorm: ")
# key:value
student = {"name": name, "dorm": dorm}
students.append(student)
for student in students:
print(f"{student['name']} is in dorm {student['dorm']}") | lance-lh/learning-cs50 | pset6/struct0.py | struct0.py | py | 303 | python | en | code | 1 | github-code | 13 |
23034384142 | def merge(them):
"""
Given a collection of dictionaries,
recursively merge them and all of their list values
"""
if not isinstance(them, list):
return them
if len(them) == 0:
return {}
if len(them) == 1:
return them[0]
if len(them) > 2:
return merge(
[
*them[:-2],
merge(them[-2:]),
]
)
first, second = them
writable = {**second}
for key, value in first.items():
conflict = writable.get(key)
if isinstance(value, dict) and isinstance(conflict, dict):
writable[key] = merge([value, conflict])
if isinstance(value, list) and isinstance(conflict, list):
writable[key] = [*map(merge, value + conflict)]
else:
writable[key] = value
return writable
def flatten(collections):
"""
Given a List[Union[List, Union[..., Any]]], flatten it into a List[Any]
Union[..., Any] in this case refers to a recursively nesting structure
"""
return [
item
for sub in collections
for item in (flatten(sub) if isinstance(sub, list) else [sub])
]
| gastrodon/terraform-compose | library/depends/tools.py | tools.py | py | 1,193 | python | en | code | 3 | github-code | 13 |
9921411463 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import datetime
import time
from collections import OrderedDict
import gi
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk
from gi.repository import GObject
from Global import getFechas
# from Tree import Tree
class TreeSemana(Gtk.TreeView):
def __init__(self):
Gtk.TreeView.__init__(self, Gtk.TreeStore(GObject.TYPE_STRING,
GObject.TYPE_STRING, GObject.TYPE_STRING, GObject.TYPE_STRING))
self.set_size_request(100, 150)
self.set_rules_hint(True)
self.set_property("enable-tree-lines", True)
self.set_headers_visible(True)
self.__set_columnas(['Fecha', 'Entrada', 'Salida', 'Saldo'])
self.show_all()
def __set_columnas(self, cols):
for col in cols:
index = cols.index(col)
cellrender = Gtk.CellRendererText()
columna = Gtk.TreeViewColumn(col, cellrender, text=cols.index(col))
columna.set_property('visible', True)
columna.set_property('resizable', False)
self.append_column(columna)
cellrender.set_property("editable", False)
class Info(Gtk.Box):
def __init__(self):
Gtk.Box.__init__(self, orientation=Gtk.Orientation.VERTICAL, homogeneous=False)
self.set_border_width(2)
self.label = Gtk.Label("Selecciona un Funcionario para ver sus datos.")
self.frame = Gtk.Frame()
scroll = Gtk.ScrolledWindow()
scroll.set_policy(Gtk.PolicyType.NEVER, Gtk.PolicyType.AUTOMATIC)
scroll.add(self.frame)
self.frame.set_label(" Semanas: ")
self.saldo = Gtk.Label('Saldo Mensual: 00:00')
self.pack_start(self.label, False, False, 0)
self.pack_start(scroll, True, True, 0)
self.pack_start(self.saldo, False, False, 5)
self.vbox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, homogeneous=False)
self.frame.add(self.vbox)
self.show_all()
self.fechas = [] # self._dict = OrderedDict() # semana: {fecha: data}
self._dict = OrderedDict()
self._dict_tree = OrderedDict()
def switch_page_and_set_user(self, user, indice, _dict):
self.label.set_text('Datos de: %s' % user)
self._dict = _dict
keys = self._dict.keys() # meses
keymes = keys[indice] # mes seleccionado fecha: [entrada, salida, saldo]
fechas = self._dict[keymes].keys() # fechas de este mes
# FIXME: hay un error con ultimasemana para el mes de diciembre
primersemana = datetime.datetime.strptime(fechas[0], '%d/%m/%Y').isocalendar()[1]
ultimasemana = datetime.datetime.strptime(fechas[-1], '%d/%m/%Y').isocalendar()[1]
# Todas las fechas de las semanas afectadas
self.fechas = getFechas(primersemana, ultimasemana)
self.__repack()
self.update(self._dict) # para que calcule los saldos
def __repack(self):
# Quitar treestores
children = self.vbox.get_children()
for child in children:
self.vbox.remove(child)
child.destroy()
# Crear treestores para cada semana
self._dict_tree = OrderedDict()
for fecha in self.fechas:
semana = fecha.isocalendar()[1]
if not semana in self._dict_tree.keys():
tree = TreeSemana()
label = Gtk.Label('Saldo Semanal: 00:00')
self.vbox.pack_start(tree, True, True, 5)
self.vbox.pack_start(label, False, False, 5)
self._dict_tree[semana] = (tree, label)
# las fechas de esta semana se agrega a este treestore
mes = str(fecha.month)
strfecha = str(datetime.date.strftime(fecha , '%d/%m/%Y'))
data = self._dict[mes][strfecha]
self._dict_tree[semana][0].get_model().append(None, [strfecha, data[0], data[1], data[2]])
def update(self, _dict):
self._dict = _dict
saldomensual = datetime.timedelta(hours=0, minutes=0)
# recorrer trees
semanas = self._dict_tree.keys()
for semana in semanas:
tree = self._dict_tree[semana][0]
# actualizar datos
model = tree.get_model()
item = model.get_iter_first()
_iter = None
suma = datetime.timedelta(hours=0, minutes=0)
while item:
_iter = item
# obtener fecha del tree
f = model.get_value(_iter, 0)
# convertir fecha en datetime.date
fecha = datetime.datetime.strptime(f , '%d/%m/%Y')
# tomar el mes de esa fecha
mes = fecha.month
# los datos en esta linea del tree se toman de _dict[mes][fecha]
data = self._dict[str(mes)][f]
model.set_value(_iter, 1, data[0])
model.set_value(_iter, 2, data[1])
model.set_value(_iter, 3, data[2])
# Calcular el saldo de la semana
temp = time.strptime(data[2], '%H:%M')
suma += datetime.timedelta(hours=temp.tm_hour, minutes=temp.tm_min)
item = model.iter_next(item)
self._dict_tree[semana][1].set_text('Total Semanal: %s' % suma)
saldomensual += suma
# Calcular el saldo del mes
self.saldo.set_text('Saldo Mensual: %s' % saldomensual) | fdanesse/TimeControl | Info.py | Info.py | py | 5,479 | python | es | code | 0 | github-code | 13 |
16130023013 | import structlog
print("\n\t DEFAULT Renderer")
log = structlog.get_logger()
log.msg("first message")
log.msg("second message", whom="world", more_than_a_string=[1, 2, 3])
log.msg("third message", key="value!", more_than_strings=[1, 2, 3])
"""
2023-03-17 12:18:16 [info ] first message
2023-03-17 12:18:16 [info ] second message more_than_a_string=[1, 2, 3] whom=world
2023-03-17 12:18:16 [info ] third message key=value! more_than_strings=[1, 2, 3]
===========================================================================
"""
print("\n\t structlog.processors.KeyValueRenderer()")
structlog.configure(
processors=[
structlog.processors.KeyValueRenderer(),
],
)
log = structlog.get_logger()
log.msg("first message")
log.msg("second message", whom="world", more_than_a_string=[1, 2, 3])
log.msg("third message", key="value!", more_than_strings=[1, 2, 3])
"""
structlog.processors.KeyValueRenderer()
event='first message'
whom='world' more_than_a_string=[1, 2, 3] event='second message'
key='value!' more_than_strings=[1, 2, 3] event='third message'
===========================================================================
"""
print("\n\t structlog.processors.JSONRenderer()")
structlog.configure(
processors=[
structlog.processors.JSONRenderer(),
],
)
log = structlog.get_logger()
log.msg("first message")
log.msg("second message", whom="world", more_than_a_string=[1, 2, 3])
log.msg("third message", key="value!", more_than_strings=[1, 2, 3])
"""
structlog.processors.JSONRenderer()
{"event": "first message"}
{"whom": "world", "more_than_a_string": [1, 2, 3], "event": "second message"}
{"key": "value!", "more_than_strings": [1, 2, 3], "event": "third message"}
===========================================================================
"""
print("\n\t structlog.processors.LogfmtRenderer()")
structlog.configure(
processors=[
structlog.processors.LogfmtRenderer(),
],
)
log = structlog.get_logger()
log.msg("first message")
log.msg("second message", whom="world", more_than_a_string=[1, 2, 3])
log.msg("third message", key="value!", more_than_strings=[1, 2, 3])
"""
structlog.processors.LogfmtRenderer()
event="first message"
whom=world more_than_a_string="[1, 2, 3]" event="second message"
key=value! more_than_strings="[1, 2, 3]" event="third message"
===========================================================================
"""
print("\n\t structlog.processors.ExceptionRenderer()")
def divide(x, y):
try:
return x / y
except Exception as e:
log = structlog.get_logger()
log.error("Error occurred while dividing", x=x, y=y, exc_info=e)
structlog.configure(
processors=[
structlog.processors.TimeStamper(fmt="iso"),
structlog.processors.ExceptionRenderer(),
structlog.processors.JSONRenderer(),
],
logger_factory=structlog.stdlib.LoggerFactory(),
wrapper_class=structlog.stdlib.BoundLogger,
context_class=dict,
cache_logger_on_first_use=True,
)
divide(1, 0)
"""
structlog.processors.ExceptionRenderer()
{"x": 1, "y": 0, "event": "Error occurred while dividing", "timestamp": "2023-03-17T12:32:01.256253Z", "exception": "Traceback (most recent call last):\n File \"D:\\MEGAsync\\Python-related\\PythonMaterial\\python3\\12_Logging\\b_structlog\\b_log_rendering_formats.py\", line 81, in divide\n return x / y\n ~~^~~\nZeroDivisionError: division by zero"}
===========================================================================
"""
| udhayprakash/PythonMaterial | python3/12_Logging/b_structlog/b_log_rendering_formats.py | b_log_rendering_formats.py | py | 3,588 | python | en | code | 7 | github-code | 13 |
13520502436 | from PySide6.QtCore import QAbstractListModel, QModelIndex, Qt, Slot
from PySide6.QtWidgets import QFileDialog, QWidget
from mozregui.ui.addons_editor import Ui_AddonsEditor
class AddonsModel(QAbstractListModel):
"""
A Qt model that can edit addons path.
"""
def __init__(self, parent=None):
QAbstractListModel.__init__(self, parent)
self.addons = []
def rowCount(self, index=QModelIndex()):
return len(self.addons)
def data(self, index, role=Qt.DisplayRole):
if role == Qt.DisplayRole:
return self.addons[index.row()]
def flags(self, index):
return Qt.ItemIsSelectable | Qt.ItemIsEnabled
def add_addon(self, addon):
if addon:
addons_list_length = len(self.addons)
self.beginInsertRows(QModelIndex(), addons_list_length, addons_list_length)
self.addons.append(addon)
self.endInsertRows()
def remove_pref(self, row):
self.beginRemoveRows(QModelIndex(), row, row)
self.addons.pop(row)
self.endRemoveRows()
class AddonsWidgetEditor(QWidget):
"""
A widget to add or remove addons, and buttons to let the user interact.
"""
def __init__(self, parent=None):
QWidget.__init__(self, parent)
self.ui = Ui_AddonsEditor()
self.ui.setupUi(self)
self.list_model = AddonsModel()
self.ui.list_view.setModel(self.list_model)
@Slot()
def add_addon(self):
(fileNames, _) = QFileDialog.getOpenFileNames(
self,
"Choose one or more addon files",
filter="addon file (*.xpi)",
)
for fileName in fileNames:
self.list_model.add_addon(fileName)
@Slot()
def remove_selected_addons(self):
selected_rows = sorted(
set(i.row() for i in self.ui.list_view.selectedIndexes()), reverse=True
)
for row in selected_rows:
self.list_model.remove_pref(row)
def get_addons(self):
return self.list_model.addons
if __name__ == "__main__":
from PySide6.QtWidgets import QApplication
app = QApplication([])
view = AddonsWidgetEditor()
view.show()
app.exec()
| mozilla/mozregression | gui/mozregui/addons_editor.py | addons_editor.py | py | 2,225 | python | en | code | 165 | github-code | 13 |
39032860366 | import pandas as pd
import numpy as np
import os
import sys
import pickle
sys.path.append("./ml_auto/")
from data_utils import CatNumAgg, FreqEnc, gen_cat_cat
from custom_estimator import Estimator
from lightgbm import LGBMRegressor
DATA_DIR = "../data/"
df = pd.read_excel(os.path.join(DATA_DIR, "data.xlsx"), sheet_name=0)
print(df.shape)
df.drop("feebackgiven", axis=1, inplace=True)
df = df.sort_values("created_on").reset_index(drop=True)
df["target"] = np.log1p(df.leads_per_opening*df.num_openings)
folds = [
(
df[(df.created_on <= df.created_on.quantile(0.7))].index.tolist(),
df[
(df.created_on > df.created_on.quantile(0.7))
& (df.created_on <= df.created_on.quantile(0.8))
].index.tolist(),
),
(
df[
(df.created_on >= df.created_on.quantile(0.1))
& (df.created_on <= df.created_on.quantile(0.8))
].index.tolist(),
df[
(df.created_on > df.created_on.quantile(0.8))
& (df.created_on <= df.created_on.quantile(0.9))
].index.tolist(),
),
(
df[
(df.created_on >= df.created_on.quantile(0.2))
& (df.created_on <= df.created_on.quantile(0.9))
].index.tolist(),
df[
(df.created_on > df.created_on.quantile(0.9))
& (df.created_on <= df.created_on.quantile(1))
].index.tolist(),
),
]
print([(df.iloc[i].shape, df.iloc[j].shape) for i, j in folds])
target = df.target.values
df = gen_cat_cat(df)
cat_freq_cols = [
"category",
"gender",
"organization",
]
fe = FreqEnc(cat_freq_cols=cat_freq_cols)
df = fe.fit_transform(df)
cat_num_agg_dict = {
"area": {
"applicant_location": ["std"],
"education": ["std"],
"num_openings": ["std"],
"max_salary": ["median"],
},
"category_city": {
"applicant_location": ["mean"],
"max_salary": ["median"],
"education": ["std"],
"num_openings": ["std"],
},
"category": {
"applicant_location": ["std"],
"education": ["mean"],
},
"organization": {
"applicant_location": ["std"],
"min_salary": ["median", "std"],
},
"city": {
"applicant_location": ["std"],
},
"category_dow": {
"max_salary": ["mean"],
},
}
catnumagg = CatNumAgg(cat_num_agg_dict=cat_num_agg_dict)
df = catnumagg.fit_transform(df)
print(df.shape)
model_cols = [
"applicant_location",
"applicant_location_mean_grpby_and_category_city",
"applicant_location_std_grpby_and_area",
"applicant_location_std_grpby_and_category",
"applicant_location_std_grpby_and_city",
"applicant_location_std_grpby_and_organization",
"category_fe",
"education",
"education_mean_grpby_and_category",
"education_std_grpby_and_area",
"education_std_grpby_and_category_city",
"english",
"gender_fe",
"max_salary",
"max_salary_mean_grpby_and_category_dow",
"max_salary_median_grpby_and_area",
"max_salary_median_grpby_and_category_city",
"min_salary_median_grpby_and_organization",
"min_salary_std_grpby_and_organization",
"num_openings",
"num_openings_std_grpby_and_area",
"num_openings_std_grpby_and_category_city",
"organization_fe",
]
params = {
"boosting_type": "gbdt",
"colsample_bytree": 0.8,
"learning_rate": 0.1,
"min_child_samples": 90,
"n_estimators": 10000,
"n_jobs": -1,
"num_leaves": 16,
"objective": "regression",
"subsample": 1.0,
"subsample_freq": 10,
}
est = Estimator(
model=LGBMRegressor(**params),
early_stopping_rounds=100,
validation_scheme=folds,
shuffle=True,
)
print(est.get_repeated_out_of_folds(df[model_cols].values, target))
feat_imps = est.feature_importances(columns=model_cols)
feat_imps["cum_imp"] = feat_imps.feature_importance.cumsum()
print(feat_imps)
est.save_model(file_name="../model/model.pkl")
with open("../feature_transformers/feat_trans.pkl", "wb") as out_file:
pickle.dump({"fe": fe, "catnumagg": catnumagg}, out_file)
| harshsarda/LeadsPredictor | src/train.py | train.py | py | 4,110 | python | en | code | 0 | github-code | 13 |
28609379553 | #Question Link: https://takeuforward.org/data-structure/aggressive-cows-detailed-solution/
#Solution (Python3): Refer the below function
def aggressiveCows(stalls, k):
def ispossible(a, n, cows, minDist):
count = 1
lastPlacedCow = a[0]
for i in range(1,n):
if (a[i] - lastPlacedCow >= minDist):
count += 1
lastPlacedCow = a[i]
if count >= cows:
return True
return False
n = len(stalls)
stalls.sort()
low = 1
high = stalls[n - 1] - stalls[0]
while(low <= high):
mid = (low+high) //2
if(ispossible(stalls, n, k, mid)):
low = mid + 1
else:
high = mid - 1
return high
pass
| AbhiWorkswithFlutter/StriverSDESheet-Python3-Solutions | Striver SDE Sheet/Day 11/Aggressive Cows.py | Aggressive Cows.py | py | 662 | python | en | code | 3 | github-code | 13 |
36260535792 | def molecule_bonds(molecule, session):
'''
Return bonds derived from residue templates where each bond is a pair of atom numbers.
Returned bonds are an N by 2 numpy array.
'''
bond_templates = session.bond_templates
if bond_templates is None:
session.bond_templates = bond_templates = Bond_Templates()
from time import time
t0 = time()
bonds, missing = bond_templates.molecule_bonds(molecule)
t1 = time()
# print('Computed', len(bonds), 'bonds for', molecule.name, 'in', '%.3f' % (t1-t0), 'seconds', missing, 'missing templates')
missing_temp = bond_templates.missing_templates(molecule) if missing > 0 else []
return bonds, missing_temp
class Bond_Templates:
'''
Use reference data describing standard PDB chemical components
to determine which atoms are bonded in a molecule.
'''
def __init__(self, templates_file = None):
if templates_file is None:
from os.path import join, dirname
templates_file = join(dirname(__file__), 'bond_templates')
self.templates_file = templates_file
self.cc_index = None # Index into all bonds list for each chemical component
self.all_bonds = None # Bonds for all chemical components.
# Array of atom names, a pair for each bond, empty name separates chemical components.
self.cc_bond_table = {} # Bond table for each chemical component
def molecule_bonds(self, molecule):
# return self.molecule_bonds_orig(molecule)
from . import molecule_cpp
if self.cc_index is None:
self.read_templates_file()
molecule_cpp.initialize_bond_templates(self.cc_index, self.all_bonds, cc_chars)
m = molecule
return molecule_cpp.molecule_bonds(m.residue_names, m.residue_nums, m.chain_ids, m.atom_names)
def molecule_bonds_orig(self, molecule):
m = molecule
unique_rnames = set(m.residue_names)
bt = self.chemical_component_bond_tables(unique_rnames)
bonds = []
res = index_pairs = None
atom_num = {}
missing_template = set()
anames, rnames, rnums, cids = m.atom_names, m.residue_names, m.residue_nums, m.chain_ids
for a in range(m.atom_count()):
rname = rnames[a]
rnum = rnums[a]
cid = cids[a]
if (rname,rnum,cid) != res:
if not index_pairs is None:
bonds.extend(self.template_bonds(index_pairs, atom_num))
atom_num.clear()
if rname in bt:
aindex, index_pairs = bt[rname]
else:
aindex = index_pairs = None
missing_template.add(rname)
res = (rname, rnum, cid)
aname = anames[a]
if aindex and aname in aindex:
atom_num[aindex[aname]] = a
# else:
# print('Atom %s from residue %s has no template bonds' % (aname, str(res)))
if not index_pairs is None:
bonds.extend(self.template_bonds(index_pairs, atom_num))
bonds.extend(self.backbone_bonds(m))
from numpy import array, int32, empty
ba = array(bonds, int32) if bonds else empty((0,2), int32)
return ba, missing_template
def template_bonds(self, index_pairs, atom_num):
bonds = []
for i1,i2 in index_pairs:
if i1 in atom_num and i2 in atom_num:
bonds.append((atom_num[i1], atom_num[i2]))
return bonds
def backbone_bonds(self, m):
'''Connect consecutive residues in proteins and nucleic acids.'''
bonds = []
ajoin = ((b'C', b'N'), (b"O3'", b'P'))
anames = sum(ajoin, ())
bbatoms = {}
for a in range(m.atom_count()):
aname = m.atom_names[a]
if aname in anames:
rnum = m.residue_nums[a]
cid = m.chain_ids[a]
bbatoms[(rnum, cid, aname)] = a
for (rnum, cid, aname), a1 in bbatoms.items():
for n1,n2 in ajoin:
if aname == n1:
a2 = bbatoms.get((rnum+1, cid, n2))
if not a2 is None:
bonds.append((a1,a2))
return bonds
def chemical_component_bond_tables(self, rnames):
'''Create template bond tables for specified chemical components'''
ccbt = self.cc_bond_table
new_rnames = set(rname for rname in rnames if not rname in ccbt)
if len(new_rnames) == 0:
return ccbt
# print('Reading bonds from file for %d residue types %s' % (len(new_rnames), str(new_rnames)))
if self.cc_index is None:
self.read_templates_file()
cci,blist = self.cc_index, self.all_bonds
for rname in new_rnames:
i = component_index(rname)
if i is None:
continue
apairs = []
bi = cci[i]
if bi != -1:
while blist[bi]:
apairs.append((blist[bi], blist[bi+1]))
bi += 2
# print('Read %s bonds %s' % (str(rname), str(apairs)))
atoms = set([a1 for a1,a2 in apairs] + [a2 for a1,a2 in apairs])
aindex = dict((a,i) for i,a in enumerate(atoms))
ipairs = tuple((aindex[a1], aindex[a2]) for a1,a2 in apairs)
ccbt[rname] = (aindex, ipairs)
return ccbt
def read_templates_file(self):
bt = open(self.templates_file, 'rb')
from numpy import fromstring, int32
self.cc_index = fromstring(bt.read(4*n_cc_chars**3), int32)
self.all_bonds = fromstring(bt.read(), 'S4')
bt.close()
def missing_templates(self, molecule):
missing = set()
for rname in molecule.rnames:
ci = component_index(rname)
if ci == -1 or self.cc_index[ci] == -1:
missing.add(rname)
return tuple(missing)
def write_template_bonds_file(components_cif_path, template_bonds_path):
'''
For each compound in the PDB chemical components file (components.cif)
record the bonds as pairs of atom names.
'''
f = open(components_cif_path)
from numpy import empty, int32, array
cp = empty((n_cc_chars**3,), int32)
cp[:] = -1
bonds = []
while True:
rname, rbonds = next_mmcif_bonds(f)
if rname is None:
break
i = component_index(rname)
cp[i] = len(bonds)
bonds.extend(sum(rbonds,()) + (b'',))
print('%s %d %d' % (str(rname), i, len(rbonds)))
f.close()
ba = array(bonds, 'S4')
bt = open(template_bonds_path, 'wb')
bt.write(cp.tostring())
bt.write(ba.tostring())
bt.close()
return cp, ba
def next_mmcif_bonds(f):
apairs = []
rname = None
foundb = False
while True:
line = f.readline()
if line.startswith('_chem_comp_bond.'):
foundb = True
elif foundb:
if line.startswith('#'):
if len(apairs) > 0:
break
else:
foundb = False
continue
fields = line.split()
rname = fields[0].encode('utf-8')
a1,a2 = fields[1].strip('"').encode('utf-8'), fields[2].strip('"').encode('utf-8')
apairs.append((a1,a2))
elif line == '':
break
return rname, apairs
# Component id can be only uppercase letters and digits
cc_chars = b'\0ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
n_cc_chars = len(cc_chars)
def component_index(rname):
'''Map 3 character chemical component name to an integer.'''
iA, iZ, i0, i9 = ord('A'), ord('Z'), ord('0'), ord('9')
k = 0
for i in rname:
if i >= iA and i <= iZ:
d = i - iA + 1
elif i >= i0 and i <= i9:
d = i - i0 + 27
else:
print ('out of range', rname, i)
return None # Character not in 0-9, A-Z
k = n_cc_chars*k + d
return k
if __name__ == '__main__':
write_template_bonds_file('components.cif', 'bond_templates')
| HamineOliveira/ChimeraX | src/apps/hydra/molecule/connect.py | connect.py | py | 8,229 | python | en | code | null | github-code | 13 |
6368029151 | import pyttsx3
import datetime
import speech_recognition as sr
import wikipedia
import webbrowser
import random
import os
import wolframalpha
import smtplib
engine=pyttsx3.init('sapi5')
voices=engine.getProperty('voices')
# print(voices[0].id)
engine.setProperty('voice',voices[0].id)
engine.setProperty('rate',168)
chrome_path = "C:\Program Files\Google\Chrome\Application\chrome.exe"
def sendEmail(to,content):
server=smtplib.SMTP('smtp.gmail.com',25)
server.ehlo()
server.starttls()
server.login('UR_ID_@gmail.com','ur_password')
server.sendmail('UR_ID_@gmail.com',to,content)
server.close()
def speak(audio):
engine.say(audio)
engine.runAndWait()
def wishme():
hour=int(datetime.datetime.now().hour)
if hour >=5 and hour<12:
speak("Good Morning!")
elif hour>=12 and hour<18:
speak("Good Aternoon!")
else:
speak("Good Evening!")
speak("this is your.. Assistant,how may i help you?")
def takeCommand():
r=sr.Recognizer()
with sr.Microphone() as source:
print("Listening...")
r.pause_threshold=1
audio=r.listen(source)
try:
print("recognizing....")
query=r.recognize_google(audio,language='en-in')
print(f"user said: {query}\n ")
except Exception as e:
# print(e)
print("say that again")
speak("say that again")
return "None"
return query
if __name__ == '__main__':
wishme()
while(True):
query=takeCommand().lower()
if 'wikipedia' in query:
speak('searching wikipedia...')
query=query.replace('wikipedia',"")
results=wikipedia.summary(query,sentences=2)
speak('according to wikipdia')
print(results)
speak(results)
elif 'youtube' in query:
speak('opening youtube..')
urL='https://www.youtube.com'
chrome_path="C:\Program Files\Google\Chrome\Application\chrome.exe"
webbrowser.register('chrome', None,webbrowser.BackgroundBrowser(chrome_path),1)
webbrowser.get('chrome').open_new_tab(urL)
elif 'google' in query:
speak('opening google..')
urL='https://www.google.com'
chrome_path="C:\Program Files\Google\Chrome\Application\chrome.exe"
webbrowser.register('chrome', None,webbrowser.BackgroundBrowser(chrome_path),1)
webbrowser.get('chrome').open_new_tab(urL)
elif 'microsoft' in query:
speak('opening microsoft..')
urL='https://www.microsoft.com'
chrome_path="C:\Program Files\Google\Chrome\Application\chrome.exe"
webbrowser.register('chrome', None,webbrowser.BackgroundBrowser(chrome_path),1)
webbrowser.get('chrome').open_new_tab(urL)
elif '18 plus' in query:
speak('opening porn..')
url = 'www.tiava.com'
chrome_path = 'C:/Program Files/Google/Chrome/Application/chrome.exe %s --incognito'
webbrowser.get(chrome_path).open_new(url)
elif 'stack overflow' in query:
speak('opening stack overflow..')
urL='https://www.stackoverflow.com'
chrome_path="C:\Program Files\Google\Chrome\Application\chrome.exe"
webbrowser.register('chrome', None,webbrowser.BackgroundBrowser(chrome_path),1)
webbrowser.get('chrome').open_new_tab(urL)
elif 'facebook' in query:
speak('opening facebook..')
urL='https://www.facebook.com'
chrome_path="C:\Program Files\Google\Chrome\Application\chrome.exe"
webbrowser.register('chrome', None,webbrowser.BackgroundBrowser(chrome_path),1)
webbrowser.get('chrome').open_new_tab(urL)
elif 'instagram' in query:
speak('opening instagram..')
urL='https://www.instagram.com'
chrome_path="C:\Program Files\Google\Chrome\Application\chrome.exe"
webbrowser.register('chrome', None,webbrowser.BackgroundBrowser(chrome_path),1)
webbrowser.get('chrome').open_new_tab(urL)
elif 'telegram' in query:
speak('opening telegram..')
urL='https://webk.telegram.org/'
chrome_path="C:\Program Files\Google\Chrome\Application\chrome.exe"
webbrowser.register('chrome', None,webbrowser.BackgroundBrowser(chrome_path),1)
webbrowser.get('chrome').open_new_tab(urL)
elif 'olx' in query:
speak('opening olx..')
urL='https://www.olx.com/'
chrome_path="C:\Program Files\Google\Chrome\Application\chrome.exe"
webbrowser.register('chrome', None,webbrowser.BackgroundBrowser(chrome_path),1)
webbrowser.get('chrome').open_new_tab(urL)
elif 'flipkart' in query:
speak('opening flipkart..')
urL='https://www.flipkart.com/'
chrome_path="C:\Program Files\Google\Chrome\Application\chrome.exe"
webbrowser.register('chrome', None,webbrowser.BackgroundBrowser(chrome_path),1)
webbrowser.get('chrome').open_new_tab(urL)
elif 'amazon' in query:
speak('opening amazon..')
urL='https://www.amazon.com/'
chrome_path="C:\Program Files\Google\Chrome\Application\chrome.exe"
webbrowser.register('chrome', None,webbrowser.BackgroundBrowser(chrome_path),1)
webbrowser.get('chrome').open_new_tab(urL)
elif 'music' in query:
music_dir='C:\\Users\\safora\\Desktop\\html\\music'
songs=os.listdir(music_dir)
print(songs)
randNo=random.randint(0,8)
os.startfile(os.path.join(music_dir,songs[randNo]))
elif 'the time' in query:
strtime=datetime.datetime.now().strftime('%H:%M:%S')
speak(f"sir,the time is {strtime}")
elif 'code' in query:
speak('opening v s code editor..')
codepath='C:\\Users\\safora\\AppData\\Local\\Programs\\Microsoft VS Code\\Code.exe'
os.startfile(codepath)
elif "what's your name" in query or "What is your name" in query:
speak("My friends call me zeera")
elif "who made you" in query or "who created you" in query:
speak("I have been created by god.")
elif "who am i" in query:
speak("If you talk then definitely your human.")
elif "why you came to world" in query:
speak("Thanks to omer. further It's a secret")
elif "write a note" in query:
speak("What should i write, sir")
note = takeCommand()
file = open('jarvis.txt', 'w')
speak("Sir, Should i include date and time")
snfm = takeCommand()
if 'yes' in snfm or 'sure' in snfm:
strTime = datetime.datetime.now().strftime("% H:% M:% S")
file.write(strTime)
file.write(" :- ")
file.write(note)
else:
file.write(note)
elif "show note" in query:
speak("Showing Notes")
file = open("jarvis.txt", "r")
print(file.read())
speak(file.read(6))
elif 'exit' in query:
speak('bye,see u later')
exit()
elif 'what is' in query:
query = query.replace("wikipedia", "")
results = wikipedia.summary(query, sentences = 1)
speak("According to Wikipedia")
print(results)
speak(results)
elif 'who is' in query:
query = query.replace("wikipedia", "")
results = wikipedia.summary(query, sentences = 1)
speak("According to Wikipedia")
print(results)
speak(results)
elif'send email' in query:
try:
speak('what should i say?')
content=takeCommand()
to='recv_ID_@gmail.com'
sendEmail(to,content)
speak('email has been sent')
except Exception as e:
print(e)
speak('sorry boss ,i am unable to send this mail')
| crazy-cyber/Python-projects | jarvis/jarvis.py | jarvis.py | py | 8,604 | python | en | code | 0 | github-code | 13 |
26593523057 | import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
plt.rcParams['font.sans-serif'] = ['SimHei']
# legend(loc='upper left')
X_train = [[2015], [2016], [2017], [2018]]
y_train = [[7], [9], [13], [17.5]]
y1 = [[0.2916], [0.38242], [0.44593], [0.55624]]
y2 = [[0.31587], [0.35656], [0.47805], [0.58842]]
y3 = [[0.05058], [0.05771], [0.07337], [0.095]]
y4 = [[0.07292], [0.08705], [0.11383], [0.13621]]
y5 = [[0.02691], [0.03178], [0.04261], [0.05496]]
y6 = [[0.02756], [0.03253], [0.04072], [0.04893]]
y7 = [[0.03801], [0.04595], [0.0593], [0.06731]]
y8 = [[0.06763], [0.07786], [0.10398], [0.13279]]
y9 = [[0.03844], [0.04799], [0.06468], [0.06982]]
y10 = [[0.0058], [0.00645], [0.00807], [0.00921]]
y11 = [[0.06465], [0.06669], [0.08242], [0.10209]]
X_test = [[6], [8], [11], [16]]
y_test = [[8], [12], [15], [18]]
def skl_func(y_train, l):
# 简单线性回归
# model = LinearRegression()
# model.fit(X_train, y_train)
xx = np.linspace(2015, 2023, 100)
# y = model.predict(xx.reshape(xx.shape[0], 1))
# plt.scatter(x=X_train, y=y_train, color='k')
# plt.plot(xx, y, '-g')
plt.scatter(x=X_train, y=y_train)
# 多项式回归
quadratic_featurizer = PolynomialFeatures(degree=8)
X_train_quadratic = quadratic_featurizer.fit_transform(X_train)
X_test_quadratic = quadratic_featurizer.fit_transform(X_test)
model2 = LinearRegression()
model2.fit(X_train_quadratic, y_train)
xx2 = quadratic_featurizer.transform(xx[:, np.newaxis])
yy2 = model2.predict(xx2)
plt.plot(xx, yy2, label=l)
skl_func(y1, "广州")
skl_func(y2, "深圳")
skl_func(y3, "珠海")
skl_func(y4, "佛山")
skl_func(y5, "江门")
skl_func(y6, "肇庆")
skl_func(y7, "惠州")
skl_func(y8, "东莞")
skl_func(y9, "中山")
skl_func(y10, "澳门")
skl_func(y11, "香港")
print('X_train:\n', X_train)
# print('X_train_quadratic:\n', X_train_quadratic)
print('X_test:\n', X_test)
# print('X_test_quadratic:\n', X_test_quadratic)
# print('r2:', model2.score(X_test_quadratic, y_test))
plt.legend()
plt.show()
| zxwtry/OJ | python/proj/chi/nihe_2.py | nihe_2.py | py | 2,179 | python | en | code | 5 | github-code | 13 |
8616331068 | import configparser
import datetime
import os
import subprocess
import sys
import numpy as np
import pandas as pd
class HRR(object):
def __init__(self, config, compile_=False):
""""
THis is a python-wrapper to handle HRR model in conjunction with a data assimilation module.
Small edition of original source code was performed.
v.1.0.0: Daily simulation.
Coding: Yuta Ishitsuka
"""
# read input info. for HRR.
self.config = configparser.ConfigParser()
self.config.read(config)
self.rootDir = self.config.get("model", "rootDir")
# file name configuration
self.srcDir = os.path.join(self.rootDir, "src/")
self.exe = os.path.join(self.srcDir, "run")
# compile or not
if compile_:
self.__compile()
def __compile(self):
print("compilation is activated. Make clean/all.")
os.chdir(self.srcDir)
print("make clean")
subprocess.check_call(["make", "clean"])
print("make all")
subprocess.check_call(["make", "all"])
os.chdir(self.rootDir)
def __createInputFile(self, date, runoffDir, restart, mode, outDir):
assimUpdate = mode
roffData = os.path.join(runoffDir,
"%s.txt" % (date.strftime("%Y%m%d")))
restFile = restart
pfafunits = self.config.get("input", "pfafunits")
ndx = self.config.get("input", "ndx")
ndt = 24 # for future improvement
dtis = 3600 # for future improvement
self.outerDt = dtis * ndt
iyear = date.year
imonth = date.month
iday = date.day
doy = (date - datetime.datetime(date.year, 1, 1, 0)).days + 1
sbRate = self.config.get("input", "sbRate")
n_ch_all = self.config.get("input", "n_ch_all")
VARS = [
"pfafunits", "ndx", "ndt", "dtis", "iyear", "imonth", "iday",
"Julian Day", "setfsub_rate", "n_ch_all"
]
VALS = {
"pfafunits": pfafunits,
"ndx": ndx,
"ndt": ndt,
"dtis": dtis,
"iyear": iyear,
"imonth": imonth,
"iday": iday,
"Julian Day": doy,
"setfsub_rate": sbRate,
"n_ch_all": n_ch_all
}
self.infoPath = os.path.join(outDir, "input.txt")
with open(self.infoPath, mode="w") as f:
f.write("%s\n" % assimUpdate)
f.write("%s\n" % self.srcDir)
f.write("%s\n" % roffData)
f.write("%s\n" % restart)
f.write("%s\n" % outDir)
[f.write("%s %s\n" % (str(VALS[var]), var)) for var in VARS]
def output(self, df, oName, mode="a"):
if mode == "w":
with open(oName, "w") as f:
df = df.reset_index().rename({"index": "Date"}, axis=1)
df.to_csv(f, index=False)
elif mode == "a":
with open(oName, "a") as f:
df.to_csv(f, header=False)
else:
raise IOError("mode %s is unsupported." % mode)
def main_day(self,
date,
flag="restart",
restart="restart.txt",
runoffDir="../data/case6/",
mode="normal",
outDir="./out"):
"""
main API to handle HRR
"""
# check operation mode
if not flag == "initial" and not flag == "restart":
raise IOError("Undefined flag mode %s" % flag)
# create input information file for HRR
self.__createInputFile(date, runoffDir, restart, mode, outDir)
# activate HRR
subprocess.check_call([self.exe, flag, self.infoPath])
nDate = date + datetime.timedelta(seconds=self.outerDt)
out = pd.read_csv(os.path.join(outDir, "discharge_cms.txt"),
header=0,
sep="\s+").rename(index={0: date})
return out, nDate
if __name__ == "__main__":
config = "./config.ini"
compile_ = True
runoffDir = "/home/yi79a/DA/missouli/pyHRR/data/case3/00/"
outDir = "/home/yi79a/DA/missouli/pyHRR/src/out"
if not os.path.exists(outDir):
os.makedirs(outDir)
model = HRR(config, compile_=compile_)
date = datetime.datetime(1990, 1, 1, 0)
out, nDate = model.main_day(date,
flag="initial",
runoffDir=runoffDir,
outDir=outDir)
model.output(out, "./test.csv", mode="w")
date = nDate
eDate = datetime.datetime(1990, 1, 5, 0)
while date < eDate:
out, nDate = model.main_day(date,
flag="restart",
runoffDir=runoffDir,
outDir=outDir)
date = nDate
model.output(out, "./test.csv", mode="a")
| windsor718/pyHRR | pyHRR.py | pyHRR.py | py | 4,965 | python | en | code | 0 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.