seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
73243352745 | import streamlit as st
import pytube as pt
import os
import subprocess
import re
from utils import logtime, load_ffmpeg
import whisper
from langchain.document_loaders import YoutubeLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
URL = 'URL'
TEXT = 'TEXT'
WHISPER = 'WHISPER'
PROCESSING = 'PROCESSING'
STATES = [URL, TEXT, WHISPER, PROCESSING]
AUDIO_FILE = "audio.mp3"
AUDIO_EXISTS = "AUDIO_EXISTS"
model = ''
st.title('Youtube Audio+Text')
def init_state():
if URL not in st.session_state:
st.session_state[URL] = ''
if TEXT not in st.session_state:
st.session_state[TEXT] = ''
if WHISPER not in st.session_state:
st.session_state[WHISPER] = ''
if AUDIO_EXISTS not in st.session_state:
st.session_state[AUDIO_EXISTS] = False
if not st.session_state[URL]:
clear_old_files()
print("Clear old files")
def clear_old_files():
for file in os.listdir():
if file.endswith(".mp3") or file == 'transcript.txt':
os.remove(file)
print(f"Removed old files::{file}")
def extract_youtube_video_id(url):
regex = r"v=([^&]+)"
match = re.search(regex, url)
if match:
return match.group(1)
else:
return None
@logtime
def load_whisper():
# if not model:
model = whisper.load_model("small")
print('Loaded Whisper Medium model')
# else:
# print('Already downloaded Whisper model')
print('Transcribing with Whisper model')
result = model.transcribe("audio.mp3")
st.session_state[WHISPER] = result["text"]
write_file(result["text"], "whisper.txt")
AUDIO_FILE = "audio.mp3"
def load_audio():
if os.path.exists(AUDIO_FILE):
st.session_state[AUDIO_EXISTS] = True
audio_file = open(AUDIO_FILE, 'rb')
audio_bytes = audio_file.read()
print(f"Audio file exists...{len(audio_bytes)}")
st.audio(audio_bytes, format="audio/mp3")
elif st.session_state[AUDIO_EXISTS]:
st.session_state[AUDIO_EXISTS] = False
def display():
container = st.container()
text_container = st.container()
# whisper_container = st.container()
load_audio()
#Download Button section
col1, col2 = st.columns(2)
with col1:
if st.session_state[AUDIO_EXISTS]:
st.download_button("Download Audio","file","audio.mp3","application/octet-stream")
with col2:
if os.path.exists("transcript.txt"):
st.download_button("Download Transcript",st.session_state[TEXT],"transcript.txt","text/plain")
with container:
with st.form(key='input_form'):
user_input = st.text_input("Youtube URL:", placeholder="http://www.youtube.com", key=URL)
input_submit_button = st.form_submit_button(label='Send')
if input_submit_button and user_input:
st.write("You entered... " + st.session_state[URL])
# transcribe()
# download()
# download_audio()
load_whisper()
with text_container:
st.text_area(label="Youtube Transcript:",
height=200,
value=st.session_state[TEXT])
# with whisper_container:
# st.text_area(label="Whisper Transcript:",
# height=200,
# value=st.session_state[WHISPER])
@logtime
def download_audio():
if st.session_state[URL]:
print("Downloading....")
yt = pt.YouTube(st.session_state[URL])
stream = yt.streams.filter(only_audio=True)[0]
stream.download(filename="audio.mp3")
print("Downloaded Audio file....")
def download():
id = extract_youtube_video_id(st.session_state[URL])
command = [f"yt-dlp --no-config -v --extract-audio --audio-format mp3 {st.session_state[URL]} -o audio.mp3"]
print(command)
out = subprocess.run(command, shell=True)
print('Download with YT-DLP done!!')
@logtime
def transcribe():
loader = YoutubeLoader.from_youtube_url(
st.session_state[URL], add_video_info=True)
splitter = RecursiveCharacterTextSplitter(chunk_size=2000,chunk_overlap=500)
docs = loader.load_and_split(splitter)
length = len(docs)
index = int(length/3+1)
print(f"Loaded {length} documents, Displaying {index}-th document")
# st.session_state[TEXT] = docs[index].page_content
st.session_state[TEXT] = write_chunks(docs,"transcript.txt")
@logtime
def write_chunks(docs, filename):
full_doc = ''
for doc in docs:
full_doc = full_doc + doc.page_content + "\n"
with open(filename, "w") as f:
f.write(full_doc)
return full_doc
def write_file(text, filename):
with open(filename, "w") as f:
f.write(text)
# return full_doc
def main():
# load_ffmpeg()
init_state()
display()
if __name__ == "__main__":
main() | olanigan/Youtube_Assistant | app.py | app.py | py | 4,825 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "streamlit.title",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "streamlit.session_state",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "streamlit.session_state",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_nam... |
8714183178 | """
meclas.py
Package for running various laser utilities in MEC
Apologies on behalf of: Eric Cunningham (and others)
To load: use import meclas or use IPython's %run magic function
Class list and brief description:
LPL -- routines for LPL pulse shaping (with some aux functions), data acquisition, etc.
efc -- extra function class, holds many useful utilities and shortcuts
ep -- shortcut plotting functions for the lazy
HAWG -- Highland AWG control and readout
LOSC -- LeCroy oscilloscope trace read-out, plotting, saving, etc.
EMeters -- LPL and SPL energy meters
MBC -- LPL bias controller utilities
YFE -- LPL YLF Front End seed laser utilities
PFN -- LPL Pulse Forming Network cap bank charging utilities
HWP -- LPL Half Wave Plate motor utilities
**Stage -- Newport and SmarAct stage utilities
**Timing -- ns and fs timing utilities
CAM -- functions for GigE camera acquisition, configuration, etc.
TTL_shutter -- Beckhoff utilities for controlling/tracking Thorlabs TTL shutters
**DG645 -- functions for DG645 operation, parameter backup and restoration, etc.
**SPL -- routines for SPL alignment, etc.
**UNIBLITZ -- UNIBLITZ shutter utilities for setting SPL trigger modes, etc.
**Spectrometer -- functions for Qmini and Ocean Optics USB4000 spectrometers
**VISAR -- routines for VISAR timing, streak camera configuration, laser control, etc.
CtrlSys -- routines for checking responsivity of PVs, hosts, hutch computers, etc.
**SCALLOPS -- routines for LPL pulse shaping simulations
**LabEnv -- functions for interfacing with lab environment monitors
**RIS -- functions for monitoring RIS-related systems and PVs
**PDU -- functions for operating power distribution units
GLOBAL -- home for global constants, PV definitions, etc.
** = FUTURE DEVELOPMENT
"""
### load packages
import socket
import time
import math
import numpy as np
import struct
#from scipy import signal
#from scipy import stats
import matplotlib.pyplot as plt
import pickle
from datetime import date, datetime
from binascii import hexlify
from binascii import unhexlify
import csv
import os.path
import sys
from ophyd.signal import EpicsSignal
import elog
import pcdsdaq.ext_scripts
import glob
#import pandas as pd
import stat
import getpass
import multiprocessing
#import threading
import termios, tty
import select
import re
#import regex as re
class LPL:
"""
Stores functions related to LPL pulse shaping, data acquisition, etc.
Functions include:
:_LinearWave #function for linear waveforms appropriate for loading to the Highland AWG
:_LinearWave2 #function for linear waveforms of variable length, appropriate for specifying targeted pulse shapes
:_ParabolicWave2 #function for parabolic waveforms of variable length, appropriate for specifying targeted pulse shapes
:_ExponentialWave2 #function for exponential waveforms of variable length, appropriate for specifying targeted pulse shapes
:_EW2 #shorthand version of _ExponentialWave2 tailored for facilitating LPL shaping at 10Hz
:_EW2stringhint #same as above but produces strings for the benefit of saving wave hints in recipes
:_ComboWave #combines waveforms for pulse shaping goals
:_TraceFormatting #projects/downsamples waveforms from one horizontal base to another
:_UpdatingShapingAlgorithm #calculates new Highland input based on old input, its corresponding output, and a target
:_FixEdges #allows tweaking of behavior of points near waveform discontinuities (edges and steps)
:_SmoothWvfm #locally smooths an input waveform as an option as part of pulse shaping
:_PulseGoal #helps define a targeted waveform for the full energy output
:_PulseMax #helps set the desired amplitude of _PulseGoal based on shape and desired output energy
:_Psns_get #retrieves the list of pulse segment durations of the current targeted output pulse
:_Psns_set #sets the list of pulse segment durations of a new targeted output pulse
:_SSs_get #retrieves the list of pulse segment start/stop heights of the current targeted output pulse
:_SSs_set #sets the list of pulse segment start/stop heights of a new targeted output pulse
:_YSSs_get #retrieves the list of YFE exponential pulse segment start/stop heights of the current targeted 10Hz output pulse
:_YSSs_set #sets the list of YFE expnential pulse segment start/stop heights of a new targeted 10Hz output pulse
:_wIter2 #wrapper function for using _UpdatingShapingAlgorithm in context
:_weichall #generates weighted waveforms for YFE, 1in1w, 4x2in1w, and 4x2in2w outputs using scopes and energy meters
:_weichToPowerVsTime #converts energy-weighted waveforms into a tuple of instantaneous power vs time
:_PGToPowerVsTime #converts scaled _PulseGoal waveforms into a tuple of instantaneous power vs time
:_pshostcheck #checks the host of the current computer to verify it is a machine with all the proper network access for pulse shaping
:_DateString #shortcut for generating a string of today's date
:get_curr_exp #retrieves the current experiment name in MEC
:get_curr_run #retrieves the current run number in MEC
:get_curr_shape #retrieves the last loaded pulse shape in MEC
:_psheaders #prepares the groundwork for pulse shaping exercises
:_psacqx #acquires data after taking a shot
:_psefc #plots most-recent shot compared to its goal and returns a suggestion for a new input waveform
:psefc10Hz #performs pulse shaping at 10Hz to converge towards an input goal for the YFE output
:_psupd #shortcut for updating the Highland waveform
:psloadwvfm #loads a previously-saved pulse shaping recipe
:pssavewvfm #saves a new pulse shaping recipe
:psviewwvfm #displays a previously-saved pulse shaping recipe
:psrefrwvfm #refreshes a previously-saved pulse shaping recipe to account for system drift
:psrecipes #lists all previously-saved pulse shaping recipes
:psmenu #allows loading or viewing of previously-saved pulse shaping recipes from a list
:pspreshot #executes routine that prepares the state of the laser and its diagnostics for a full-energy shot
:pspostshot #executes routine that records all data after a full-energy shot and returns the laser to a stand-by state
:On #turns the long-pulse laser system on
:SHG_opt #executes the optimization routine for the SHG crystal angles of all four arms of the LPL
Potential future work includes:
- put all the stuff Galtier wants to do inside lpl
- LPL.On() instead of YFE.On(), e.g.
- same for seeing shapes, changing energy, etc.
- help avoid TAB-complete problems because of object instantiation
- like LOSC('a'), for example
- change names to be lowercases so Galtier can TAB-complete easier
- add underscores to methods not generally intended for non-expert usage
- consider consolidating some functions into one
- e.g. _LinearWave, _ExponentialWave, etc.
- add Deconvolution function -- help shape, seems that 100ps length also affected
- account for the instrument response of the PD, amp, scope, etc. in determining a detected waveform
- save PFN voltages on-shot too?
- Scope vertical resolution problem -- casting somewhere? better to motorize characterized ND filters...
- Slow feedback to 10Hz pulse goal based on full shot rbv? Esp to help front edge problems?
- Need this combined with SCALLOPS? Need a fit function to data for the full rbv?
- YSSs: YFE equivalent of SSs
- Steal a notepad pv array
- _EW2(Psns, YSSs)
- Have a version of pulsegoal but 10hz using _ew2 to interpolate instead of lw, still have dellist
- SmartPulseGoal10Hz using output of SCALLOPS
"""
def _LinearWave(Edge1PixNo,Edge1Height,Edge2PixNo,Edge2Height):
"""
Generates hex string-formatted linearly-interpolated waveform of
length 140 between two input points
Primarily intended for use with the Highland AWG, so max value capped at 65535
_LinearWave(1,10000,120,28000) returns a string of length 140 (560 chars)
with linear ramp from (pixel 1, height 10000) to (pixel 120, height 28000)
"""
itt=0
NewString=''
if Edge1Height>65535:
print('Edge1 height exceeds max value of 65535')
h1=65535
elif Edge1Height<0:
print('Edge1 height must be positive')
h1=0
else:
h1=int(Edge1Height)
if Edge2Height>65535:
print('Edge2 height exceeds max value of 65535')
h2=65535
elif Edge2Height<0:
print('Edge2 height must be positive')
h2=0
else:
h2=int(Edge2Height)
#
if Edge1PixNo>Edge2PixNo:
print('Edge1 must come before Edge2')
Dummy=int(Edge1PixNo)
Edge1PixNo=int(Edge2PixNo)
Edge2PixNo=Dummy
if Edge1PixNo<1:
print('Edge1 pixel number must be >=1')
p1=0
elif Edge1PixNo>140:
print('Edge1 pixel number must be <=140')
p1=139
else:
p1=int(Edge1PixNo)-1
if Edge2PixNo<1:
print('Edge2 pixel number must be >=1')
p2=0
elif Edge2PixNo>140:
print('Edge2 pixel number must be <=140')
p2=139
else:
p2=int(Edge2PixNo)-1
#
if p1==p2:
print('Warning: pulse width specified as single pixel.')
return 140*[0]
#
while itt<140:
if itt<p1:
NewString+='0000'
elif p1<=itt<=p2:
NewString+=HAWG._Hex2Byte(int(h2+((itt-p2)*(h2-h1)/float(p2-p1))))
else:
NewString+='0000'
itt+=1
return NewString
def _LinearWave2(Edge1PixNo,Edge1Height,Edge2PixNo,Edge2Height,offsetQ=0,arraylenQ=5002):
"""
Generates linearly-interpolated waveform between two points (and 0 outside those points)
with requested linear offset and array length
Useful in part for specifying YFE waveforms at 10Hz, generating "goal" waveforms, etc.
_LinearWave2(500,.1,1025,.8,0,5002) returns an array of length 5002 with a linear ramp
from (pixel 500, height 0.1) to (pixel 1025, height 0.8) with no vertical offset
"""
itt=0
h1=Edge1Height-offsetQ
h2=Edge2Height-offsetQ
p1=int(Edge1PixNo)-1
p2=int(Edge2PixNo)-1
NewList=[]
#
while itt<arraylenQ:
if itt<p1:
NewList.append(offsetQ-offsetQ)
elif p1<=itt<=p2:
nextval=h2+((itt-p2)*(h2-h1)/float(p2-p1))#h1*((h2/float(h1))**((itt-p1)/float(p2-p1)))
NewList.append(nextval)
else:# itt>p2:
NewList.append(offsetQ-offsetQ)
itt+=1
return np.array(NewList)+offsetQ
def _ParabolicWave2(Edge1PixNo,Edge1Height,MidPixNo,MidHeight,Edge2PixNo,Edge2Height,offsetQ=0,arraylenQ=5002):
"""
Generates parabolically-interpolated waveform using three points (and 0 outside those points)
with requested linear offset and array length
Only rarely used for specifying YFE waveforms at 10Hz, generating "goal" waveforms, etc.
_ParabolicWave2(500,.1,800,.15,1025,.8,0,5002) returns an array of length 5002 with a
parabola fit to (pixel 500, height 0.1), (pixel 800, height 0.15), and
(pixel 1025, height 0.8) with no vertical offset
"""
itt=0
h1=Edge1Height-offsetQ
h2=Edge2Height-offsetQ
h3=MidHeight-offsetQ
p1=int(Edge1PixNo)-1
p2=int(Edge2PixNo)-1
p3=int(MidPixNo)-1
NewList=[]
while itt<arraylenQ:
if itt<p1:
NewList.append(offsetQ-offsetQ)
elif p1<=itt<=p2:
nextval=(h1*(itt-p2)*(itt-p3)/float((p2-p1)*(p3-p1)))+(h2*(itt-p1)*(itt-p3)/float((p2-p1)*(p2-p3)))+(h3*(itt-p1)*(itt-p2)/float((p3-p1)*(p3-p2)))
NewList.append(nextval)
else:# itt>p2:
NewList.append(offsetQ-offsetQ)
itt+=1
return np.array(NewList)+offsetQ
def _ExponentialWave2(Edge1PixNo,Edge1Height,Edge2PixNo,Edge2Height,offsetQ=0,arraylenQ=5002):
"""
Generates exponentially-interpolated waveform using two points (and 0 outside those points)
with requested linear offset and array length
Most-used function for specifying YFE waveforms at 10Hz (exponential seed waveforms
become ~linear after laser amplification)
_ExponentialWave2(500,.1,1025,.8,0,5002) returns an array of length 5002 with an exponential ramp
from (pixel 500, height 0.1) to (pixel 1025, height 0.8) with no vertical offset
"""
itt=0
h1=Edge1Height-offsetQ
h2=Edge2Height-offsetQ
p1=int(Edge1PixNo)-1
p2=int(Edge2PixNo)-1
NewList=[]
#
while itt<arraylenQ:
if itt<Edge1PixNo:
NewList.append(offsetQ-offsetQ)
elif p1<=itt<=p2:
nextval=h1*((h2/float(h1))**((itt-p1)/float(p2-p1)))
NewList.append(nextval)
else:# itt>p2:
NewList.append(offsetQ-offsetQ)
itt+=1
return np.array(NewList)+offsetQ
@classmethod
def _EW2(cls,Psns='curr',SSs='curr',YSSs='curr',offsetQ=0,YFEbkgrdY=-.004):
"""
Shorthand for generating most common _ExponentialWave2 output (or even combination of several _ExponentialWave2 outputs)
Preferred use is to use LPL._Psns_set(), LPL._SSs_set(), and LPL._YSSs_set() for _EW2 to find and use
When using Psns='curr' | SSs='curr' | YSSs='curr': Psns, SSs, and YSSs all are loaded with the corresponding ***_get() commands
Alternatively they can be specified explicitly, i.e. Psns=[10.25], SSs=[[98,100]], YSSs=[[.02,.114]]
Compared to _ExponentialWave2, _EW2:
:can combine several summed _ExponentialWave2 outputs into one convenient function
:Example: _EW2(Psns=[5,5.25],SSs=[[49,50],[98,100]],YSSs=[[.01,.03],[.055,.16]]) is equivalent to
_ExponentialWave2(500,.01,1000,.03,0,5002)+_ExponentialWave2(1000,.055,1525,.16,0,5002)
:infers Edge1PixNo and Edge2PixNo of each _ExponentialWave2 segment from Psns and SSs
:specifies Edge1Height and Edge2Height of each _ExponentialWave2 segment using YSSs
:still permits offset specification using offsetQ, if desired
:automatically uses the standard arraylenQ of 5002
"""
if Psns=='curr':
Psns=cls._Psns_get()
if SSs=='curr':
SSs=cls._SSs_get()
if YSSs=='curr':
YSSs=cls._YSSs_get()
if len(Psns) != len(YSSs):
print('Lengths of Psns ({}) and YSSs ({}) do not match! Exiting...'.format(len(Psns),len(YSSs)))
return False
outwvfm=cls._LinearWave2(500,0,1025,0,0,5002);#500, 1000 1000, 1525
YPsns=np.cumsum([0]+[Psns[ii]-0.25*(1 if SSs[ii][1] == SSs[ii+1][0] else 0) for ii in range(len(Psns)-1)] + [Psns[-1]])
YPsnsPairs=[[int(500+YPsns[ii]*100), int(500+YPsns[ii+1]*100)] for ii in range(len(YPsns)-1)]
try:
for ii in range(len(YSSs)):
outwvfm+=cls._ExponentialWave2(Edge1PixNo=YPsnsPairs[ii][0],Edge1Height=YSSs[ii][0],
Edge2PixNo=YPsnsPairs[ii][1],Edge2Height=YSSs[ii][1],offsetQ=offsetQ,arraylenQ=5002)
return outwvfm
except:
print('Failed to generate waveform!')
return False
@classmethod
def _EW2stringhint(cls,Psns='curr',SSs='curr',YSSs='curr',YFEbkgrdY=-.004):
"""
Shorthand for generating string hint based most common _ExponentialWave2 output (or even combination of several _ExponentialWave2 outputs)
Preferred use is to use LPL._Psns_set(), LPL._SSs_set(), and LPL._YSSs_set() for _EW2 to find and use
When using Psns='curr' | SSs='curr' | YSSs='curr': Psns, SSs, and YSSs all are loaded with the corresponding ***_get() commands
Alternatively they can be specified explicitly, i.e. Psns=[10.25], SSs=[[98,100]], YSSs=[[.02,.114]]
Compared to _ExponentialWave2, _EW2:
:can combine several summed _ExponentialWave2 outputs into one convenient function
:Example: _EW2(Psns=[5,5.25],SSs=[[49,50],[98,100]],YSSs=[[.01,.03],[.055,.16]]) is equivalent to
_ExponentialWave2(500,.01,1000,.03,0,5002)+_ExponentialWave2(1000,.055,1525,.16,0,5002)
:infers Edge1PixNo and Edge2PixNo of each _ExponentialWave2 segment from Psns and SSs
:specifies Edge1Height and Edge2Height of each _ExponentialWave2 segment using YSSs
:still permits offset specification using YFEbkgrdY, if desired
:automatically uses the standard arraylenQ of 5002 and EW offset of 0
"""
if Psns=='curr':
Psns=cls._Psns_get()
if SSs=='curr':
SSs=cls._SSs_get()
if YSSs=='curr':
YSSs=cls._YSSs_get()
if len(Psns) != len(YSSs):
print('Lengths of Psns ({}) and YSSs ({}) do not match! Exiting...'.format(len(Psns),len(YSSs)))
return False
outwvfm=''
YPsns=np.cumsum([0]+[Psns[ii]-0.25*(1 if SSs[ii][1] == SSs[ii+1][0] else 0) for ii in range(len(Psns)-1)] + [Psns[-1]])
YPsnsPairs=[[int(500+YPsns[ii]*100), int(500+YPsns[ii+1]*100)] for ii in range(len(YPsns)-1)]
try:
for ii in range(len(YSSs)):
outwvfm+='_ExponentialWave2({},{},{},{},0,5002)+'.format(YPsnsPairs[ii][0],YSSs[ii][0],YPsnsPairs[ii][1],YSSs[ii][1])
outwvfm=outwvfm[:-1]
outwvfm+=';; YFEbkgrdY={}'.format(YFEbkgrdY)
return outwvfm
except:
print('Failed to generate string!')
return False
def _ComboWave(WList): #accept list or csv of 140 pts
"""
Combines list of 140-pt arrays into single waveform scaled to have a maximum value of 1
"""
PreNormL=[]
for DesiredOutputPulseShapeQ in WList:
if len(DesiredOutputPulseShapeQ) == 140*4:#will accept pre-formatted Hex2Byte text
PreNormL.append(np.array([int(DesiredOutputPulseShapeQ[4*ii:4*ii+4],16) for ii in range(len(DesiredOutputPulseShapeQ)//4)]))
elif len(DesiredOutputPulseShapeQ)==140:#will accept a straight list
PreNormL.append(np.array(DesiredOutputPulseShapeQ))
elif DesiredOutputPulseShapeQ.endswith(('.txt','.csv','.dat')):#will accept file
with open(DesiredOutputPulseShapeQ,'r') as filehead:
RawListQ=filehead.read()
if '\r\n' in RawListQ:
ListedValues=RawListQ.split('\r\n')
elif '\n' in RawListQ:
ListedValues=RawListQ.split('\n')
elif ',' in RawListQ:
ListedValues=RawListQ.split(',')
else:
print('Unrecognized format on input file.')
return
if len(ListedValues) != 140:
print('File must have 140 entries; entry count: '+str(len(ListedValues)))
return
PreNormL.append(np.array(ListedValues))
CPreNormL=np.sum(PreNormL,0)
return [1.*entry/float(max(CPreNormL)) for entry in CPreNormL]
def _TraceFormatting(PDTrace, PDFETMap, MaxPDValue, AvgRange=25, FWHM=4):
"""
Takes PD trace and mapping function to generate windowed, averaged, scaled list of 140 pts
"""
MeasuredOutputPulseShape=[]
WeightList=[math.exp(-4*math.log(2)*((ii+1-round(AvgRange/2.))/FWHM)**2) for ii in range(AvgRange)]
WSum = sum(WeightList)
MX , B = PDFETMap
for FETNo in range(140):
Loc = round(MX*FETNo + B)
WSample=sum([PDTrace[int(Loc+(ii+1-round(AvgRange/2.)))]*WeightList[ii] for ii in range(AvgRange)])/WSum
MeasuredOutputPulseShape+=[WSample/MaxPDValue]
return MeasuredOutputPulseShape
def _UpdatingShapingAlgorithm(DesiredOutputPulseShape, MeasuredOutputPulseShape, InputPulseShape, StepQ):
"""
Accepts pre-formatted input, measurement, and goal waveforms to calculate next-iteration input waveform using specified step size
"""
G, M, I = DesiredOutputPulseShape, MeasuredOutputPulseShape, InputPulseShape
NewInputPulseShape=np.clip([abs((StepQ*(G[ii]-M[ii]))+I[ii])*math.ceil(G[ii]) for ii in range(len(G))],0,1)#math.ceil(G) is a mask that disallows values outside the goal
return NewInputPulseShape
def _FixEdges(WavF,DurationListQ,StartStopListQ,PtNumFront=3,PtNumBack=2,CorrFactorFront=.97,CorrFactorBack=1.):
"""
Applies fixed relationship between points close to waveform discontinuities (e.g. at beginning, end, step, etc.)
specified by Duration and Start/Stop lists
WavF is the input waveform
DurationListQ is the pulse segment durations (e.g. Psns, like [10.25] or [5,5.25])
StartStopListQ is the pulse segment stop/stop list (e.g. SSs, like [[98,100]] or [[24,25],[98,100]])
PtNumFront is the number of points at the front edge of the pulse to be fixed
PtNumBack is the number of points at the back edge of the pulse to be fixed
CorrFactorFront is the fixed multiplicative factor applied from point to point moving towards the front edge of the pulse
:for the example of PtNumFront=2, CorrFactorFront=0.97 sets P_2=0.97*P_3 then P1=0.97*P_2
CorrFactorBack is the fixed multiplicative factor applied from point to point moving towards the back edge of the pulse
:for the example of PtNumBack=3, CorrFactorBack=1.01 sets P_{K-2}=1.01*P_{K-3} then P_{K-1}=1.01*P_{K-2}
then P_{K}=1.01*P_{K-1}, where P_{K} is the pulse's last point, P_{K-1} is the pulse's second-to-last point, etc.
New waveform with fixed edges is returned
"""
FirstPix=0#was 50
DurListQ=np.cumsum([0]+DurationListQ)
fWavF=WavF[:]
for ii in range(PtNumFront):
fWavF[FirstPix+(PtNumFront-ii-1)]=fWavF[FirstPix+(PtNumFront-ii)]*CorrFactorFront
DisconCount=0
ContCount=0
if len(StartStopListQ)>1:
for ii in range(len(StartStopListQ)-1):
if StartStopListQ[ii][-1] != StartStopListQ[ii+1][0]:
DisconCount+=1
fWavF[FirstPix+int(4*DurListQ[ii+1])-1-ContCount]=fWavF[FirstPix+int(4*DurListQ[ii+1])-2-ContCount]
fWavF[FirstPix+int(4*DurListQ[ii+1])-ContCount]=fWavF[FirstPix+int(4*DurListQ[ii+1])+1-ContCount]
else:
ContCount+=1
#fWavF[FirstPix]=fWavF[FirstPix+1]*1.1
#try fixing last THREE pixels to help back edge
for ii in range(PtNumBack):
fWavF[FirstPix+int(4*DurListQ[-1])-(PtNumBack-1-ii)-ContCount]=fWavF[FirstPix+int(4*DurListQ[-1])-(PtNumBack-ii)-ContCount]*CorrFactorBack
for ii in range(len(fWavF)):
if np.mean(fWavF) < 1:
if fWavF[ii] > 1:
fWavF[ii]=1.0
else:
if fWavF[ii]>28000:
fWavF[ii]=28000
return fWavF
def _SmoothWvfm(wvfm_in):
"""
Performs rudimentary smoothing of waveform by looking at neighboring pixels
Accepts single waveform as input and return smoothed output waveform
"""
wvfm_out=wvfm_in[:]
for ii in range(len(wvfm_in)-2):
wvfm_out[ii+1]=.25*wvfm_in[ii]+.5*wvfm_in[ii+1]+.25*wvfm_in[ii+2]
return wvfm_out
@classmethod
def _PulseGoal(cls,DurationListQ,StartStopListQ):#140 pt list with max at 1
"""
Generates 140-pt list according to provided Duration and Start/Stop lists
"""
BeginPix=1
DurListQ=np.cumsum([0]+list(DurationListQ))
SSListQ=StartStopListQ[:]
SegTotQ=len(DurListQ)-1
if SegTotQ!=len(SSListQ):
print('Error')
return
DelListQ=[]
for ii in range(-1+len(SSListQ)):
if len(SSListQ[ii])!=2:
print('Error')
return
if SSListQ[ii][1]==SSListQ[ii+1][0]:
DelListQ.append(BeginPix+(DurListQ[ii+1]*4)-1)
SegmentsQ=[]
for ii in range(SegTotQ):
SegmentsQ.append(cls._LinearWave(int(BeginPix+(DurListQ[ii]*4)),int(20000.*SSListQ[ii][0]/100.),int(BeginPix+(DurListQ[ii+1]*4)-1),int(20000.*SSListQ[ii][1]/100.)))
return np.append(np.delete(np.array(cls._ComboWave(SegmentsQ)),np.array(DelListQ).astype(int)),[0]*len(DelListQ))
@classmethod
def _PulseMax(cls,DurationListQ,StartStopListQ,zzJQ):
"""Gets amplitude setting for segmented, arbitrary _PulseGoal using Duration and Start/Stop lists and targeted energy"""
return (1.0*StartStopListQ[-1][-1]/100.)*(50.0*zzJQ/(5.0*500.0*np.sum(cls._PulseGoal(DurationListQ,StartStopListQ))))
def _Psns_get():
"""
Return the current value of Psns, which is an array of pulse duration segments in nanoseconds
Example: after loading a 15ns flat-top pulse, LPL._Psns_get() will return [15.25]
Example: after loading a 5ns-5ns step pulse, LPL._Psns_get() will return [5,5.25]
"""
return list(GLOBAL.PSNS.get())
def _Psns_set(NewPsnsArr):
"""
Sets the current value of Psns, which is an array of pulse duration segments in nanoseconds
Each individual value in the array must be a multiple of 0.25 (spacing of Highland AWG pixels is 250ps or 0.25ns)
Psns must have one segment duration for every pair of segment heights in the segment heights parameter SSs
Total summed length of array values should be less than 35ns (total shaping window of Highland AWG)
If the back end of a particular segment is continuous in height with the front end of the following segment
(e.g. for smooth ramp pulses), you must add 0.25 to the duration of the first segment
This function can be used in preparation for making a new pulse recipe
Example: use LPL._Psns_set([7.25]) to prepare to make a new 7ns pulse
Example: use LPL._Psns_set([5,8.25]) to prepare to make a new 5ns-8ns step pulse
"""
if type(NewPsnsArr) is not list:
NewPsnsArr=[NewPsnsArr]
for val in NewPsnsArr:
if val%.25 != 0:
print('Psns values need to be multiples of 0.25ns! Update failed!')
return False
try:
GLOBAL.PSNS.set(NewPsnsArr)
return
except:
print('Psns update failed!')
return False
@classmethod
def _SSs_get(cls):
"""
Return the current value of SSs, which is an array of pairs of pulse segments heights as percentages of maximum (100%)
Example: after loading a 15ns flat-top pulse (Psns=[15.25]), LPL._SSs_get() may return something like [[98,100]]
Example: after loading a 3ns-7ns pulse with a 4x ratio (Psns=[3,7.25]), LPL._SSs_get() may return something like [[24,25],[98,100]]
"""
return [list(GLOBAL.SSS.get()[2*ii:2*ii+2]) for ii in range(len(cls._Psns_get()))]
def _SSs_set(NewSSsArr):
"""
Sets the current value of SSs, which is an array of pairs of pulse segments heights as percentages of maximum (100%)
SSs must have one pair of heights for every duration segment in the segment durations parameter Psns
This function can be used in preparation for making a new pulse recipe
Example: use LPL._SSs_set([[98,100]]) to prepare to make a new flat-top
Example: use LPL._Psns_set([[32,33],[75,100]]) to prepare to make a new step pulse with a flat first step and a
15%-gradient second step and a 3x ratio between the maxima of the two steps
"""
if len(np.array(NewSSsArr).shape) == 1:
try:
if len(NewSSsArr)%2 == 0:
GLOBAL.SSS.set(NewSSsArr)
return
else:
raise Exception
except:
print('Failed to write new SSs values! SSs length: {}'.format(len(NewSSsArr)))
return False
elif len(np.array(NewSSsArr).shape) == 2:
try:
GLOBAL.SSS.set([val for SSpair in NewSSsArr for val in SSpair])
return
except:
print('Parsing failure! Failed to write new SSs values!')
return False
else:
print('Unexpected shape! Failed to write new SSs values!')
return False
@classmethod
def _YSSs_get(cls):
"""
Return the current value of YSSs, which is an array of pairs of pulse segments heights as YFE diode amplitude
These parameters are used as shorthand in _EW2()
"""
return [list(GLOBAL.YSSS.get()[2*ii:2*ii+2]) for ii in range(len(cls._Psns_get()))]
def _YSSs_set(NewYSSsArr):
"""
Sets the current value of YSSs, which is an array of pairs of pulse segments heights as YFE diode amplitude
YSSs must have one pair of heights for every duration segment in the segment durations parameter Psns
This function can be used in specifying the YFE goal segment heights
"""
if len(np.array(NewYSSsArr).shape) == 1:
try:
if len(NewYSSsArr)%2 == 0:
GLOBAL.YSSS.set(NewYSSsArr)
return
else:
raise Exception
except:
print('Failed to write new YSSs values! YSSs length: {}'.format(len(NewYSSsArr)))
return False
elif len(np.array(NewYSSsArr).shape) == 2:
try:
GLOBAL.YSSS.set([val for YSSpair in NewYSSsArr for val in YSSpair])
return
except:
print('Parsing failure! Failed to write new YSSs values!')
return False
else:
print('Unexpected shape! Failed to write new YSSs values!')
return False
@classmethod
def _wIter2(cls,sQ,wQ,DurationListQ,StartStopListQ,zzJQ,mapnowQ,stepQQ,Hdisplay=False):
"""Calculates next suggested AWG input given 1) a previous full-energy waveform (+ mapping) and its corresponding AWG input,
2) the Duration and Start/Stop lists to specify the goal, and 3) the requested step size of the correction"""
avgfwhm=90;avgrange=11;#250;
DurListQ=np.cumsum([0]+DurationListQ)
w1,w2=0,int(DurListQ[-1]*4)+5 # 50-5, 50+int(DurListQ[-1]*4)+5
PGQ=cls._PulseGoal(DurationListQ,StartStopListQ)
if np.abs(len(sQ)-10000)<10:
PMQcorr=1
elif np.abs(len(sQ)-1000)<10:
PMQcorr=10
else:
print('Warning: unanticipated pulse shape array length of '+str(len(sQ))+', _PulseMax scaling may be off...')
PMQcorr=1
PMQ=cls._PulseMax(DurationListQ,StartStopListQ,zzJQ)*PMQcorr
wnew2=cls._FixEdges(cls._UpdatingShapingAlgorithm(PGQ,cls._TraceFormatting(sQ,mapnowQ,PMQ,AvgRange=avgrange,FWHM=avgfwhm), wQ,stepQQ),DurationListQ,StartStopListQ)
if Hdisplay == True:
ep.ll([0.*np.array(wnew2[w1:w2]),np.array(wnew2[w1:w2])-np.array(wQ[w1:w2]),np.array(cls._TraceFormatting(sQ,mapnowQ,PMQ,AvgRange=avgrange,FWHM=avgfwhm))[w1:w2]*.6,np.array(PGQ)[w1:w2]*.6])
ep.llxy([cls._weichToPowerVsTime(sQ),cls._PGToPowerVsTime(Psns=DurationListQ, SSs=StartStopListQ, zzJQ=zzJQ)],
xlb='Time (ns)',ylb='Power (W)',
xlim=[-1,1+np.sum(DurationListQ)-0.25*np.sum([1 if StartStopListQ[ii][1] == StartStopListQ[ii][0] else 0 for ii in range(len(StartStopListQ)-1)])])
return wnew2
def _weichall():
"""
Generates weighted waveforms for YFE, 1in1w, 4x2in1w, and 4x2in2w outputs using scopes and energy meters
Returns an array of energy-weighted waveforms in the order listed above (with the 2in heads in order AB, EF, GH, IJ)
"""
try:
LAchall=LOSC('a').rchall();LBchall=LOSC('b').rchall();L2chall=LOSC('2').rchall();
allwvfm=[*LAchall[:2],*LBchall,*L2chall];
allenergy=[*EMeters.EG1wYFE1in(),*EMeters.EG1w2in()[0],*EMeters.EG()[0][0]]
allweich=[]
for ii in range(10):
templistQ=allwvfm[ii]
bkgrdbuffer=int(0.038*len(templistQ))
bkgrdQ=np.mean(templistQ[:bkgrdbuffer])
ensampQ=allenergy[ii]
weightQ=ensampQ/np.sum(np.array(templistQ)-bkgrdQ)
allweich.append(np.array(weightQ*(np.array(templistQ)-bkgrdQ)))
return allweich
except:
print('Weighted waveform generation failed!')
return False
def _weichToPowerVsTime(weiarr):
"""
Converts energy-weighted scope channels into a tuple of (time_values, instantaneous_power_in_watts)
(Assumes a fixed time window of 50ns)
"""
return (np.linspace(-5,45,len(weiarr)), np.array(weiarr)/(50e-9/len(weiarr)))
@classmethod
def _PGToPowerVsTime(cls, Psns, SSs, zzJQ):
"""
Converts energy-weighted _PulseGoal into a tuple of (time_values, instantaneous_power_in_watts)
(time window chosen to match the same 50ns window of diagnostic fast photodiodes + oscilloscopes)
Accepts pulse-specifying input parameters Psns (pulse duration segments in ns), SSs (pulse segment start/stop heights in %),
and overal pulse energy zzJQ
"""
PGlistwvfm=[0]*20 + list(cls._PulseGoal(DurationListQ=Psns,StartStopListQ=SSs)) + [0]*40
PGwvfm = np.array(PGlistwvfm) * (zzJQ)/(np.sum(PGlistwvfm)) / (50e-9 / len(PGlistwvfm))
return (np.linspace(-5,45,len(PGwvfm)),PGwvfm)
def _pshostcheck():
"""
This warns users if they're trying to do sg that requires the use specific hosts (e.g. to reach the ICS subnet)
List of approved hosts can be found in GLOBAL.OKHOSTS
"""
try:
hostname=socket.gethostname()
if hostname not in GLOBAL.OKHOSTS:
print('Host must be one of the following:')
for eahost in GLOBAL.OKHOSTS:
print(eahost)
print('Current host: '+hostname)
raise Exception
except Exception:
print('EXIT')
os._exit(1)
try:
curruser=getpass.getuser()
if curruser not in GLOBAL.OKUSERS:
print('Warning: you are logged in as '+curruser+'. Beware of permissions issues... You may even unwittingly cause some yourself!')
print('Suggested users:')
for eauser in GLOBAL.OKUSERS:
print(eauser)
except:
print('Failed: could not ID current user!')
return
def _DateString():
"""Shorthand way of getting the current date in format YYYYMMDD"""
qdate=date.today()
return qdate.strftime('%Y%m%d')
def get_curr_exp(timeout=15, hutch_name='mec'):
"""Returns the name of the current experiment running in MEC and adds it to notepad PV GLOBAL.CurrExp"""
script=pcdsdaq.ext_scripts.SCRIPTS.format(hutch_name,'get_curr_exp')
exp=pcdsdaq.ext_scripts.cache_script(script,timeout=timeout)
curr_exp=exp.lower().strip('\n')
try:
GLOBAL.CurrExp.put(curr_exp)
except:
try:
GLOBAL.CurrExp.put(hutch_name+'xx####')
except:
print('Failed to write current experiment to notepad PV!')
return curr_exp
def get_curr_run(timeout=15, hutch_name='mec'):
"""Returns the current run number in MEC and adds it to notepad PV GLOBAL.CurrRun"""
try:
curr_run=pcdsdaq.ext_scripts.get_run_number(hutch=hutch_name,timeout=10)
except:
print('Failed to retrieve run number, setting to 9000!')
curr_run=9000
try:
GLOBAL.CurrRun.put(int(curr_run))
except:
print('Failed to write run number to notepad PV!')
return curr_run
def get_curr_shape(display=True):
"""Returns tuple of the name of the last loaded pulse and the last time it was loaded or refreshed (as a str in '%H%M%S.%Y%m%d' format)"""
try:
curr_shape=GLOBAL.CurrShape.get()
shape_time=str(GLOBAL.CurrShapeLoadTime.get())#%H%M%S.%Y%m%d
if display:
print('Last loaded pulse shape: {}'.format(curr_shape))
print('Last loaded or refreshed: {}:{}:{} on {}{}-{}-{}'.format(*re.findall('..', shape_time.split('.')[0]),*re.findall('..', shape_time.split('.')[1])))
return (curr_shape, shape_time)
except:
print('Failed to retrieve current shape!')
return False
@classmethod
def _psheaders(cls):
"""Checks to see if shotlog files exist for the day, returns the date string and current run number"""
cls._pshostcheck()
DateStr=cls._DateString()
for head in ['w','y','s1in1w','s42in1w','s42in2w','s']:
if not os.path.exists(GLOBAL.PSFILEPATH+head+DateStr+'.p'):
if head == 'w':
print('No laser file found -- probably the first shot of the day.')
try:
efc.pickledump2([],GLOBAL.PSFILEPATH+head+DateStr+'.p')
except:
print('Could not create file {}!'.format(GLOBAL.PSFILEPATH+head+DateStr+'.p'))
curr_run=cls.get_curr_run()
return (DateStr, curr_run)
@classmethod
def _psacqx(cls, save_flag=True, display=False):#RunNumQQ=False,
"""
Acquisition sequence after shooting the LPL, primary component of psposthot()
(pspostshot() is the preferred function to use before taking a shot)
Actions taken include preparing save folders, gathering shot data, preparing mini shot report, saving data to file, etc.
save_flag=True means that the shot data will be saved to the eLog of the current experiment (in addition to internal laser records)
save_flag=False means that the shot data will be saved to internal laser records only, NOT to user eLog
display=True means that the acquired shot's energy-weighted, combined 2in2w waveform will be plotted as power vs. time
display=False means that no waveform plot will be generated upon execution of the function
"""
(DateStr, curr_run) = cls._psheaders()
psfpQ=GLOBAL.PSFILEPATH
RunNumStr=str(curr_run).zfill(4)
RunName='run'+str(RunNumStr)+'_'
headlist=['AB','EF','GH','IJ']
#get the current experiment name
try:
ExpName=cls.get_curr_exp()
except:
ExpName=GLOBAL.CurrExp.get()
#check for folders for saving scope data; if they don't exist, it makes them
fpQ='/reg/neh/operator/mecopr/experiments/'+ExpName+'/lecroy/'
if not os.path.exists(fpQ[-7]):
print('File path '+fpQ[-7]+' does not exist! Trying to create it...')
try:
os.makedirs(fpQ[-7]);print('Folder created successfully!');
os.chmod(fpQ[-7],stat.S_IRWXU|stat.S_IRWXG|stat.S_IRWXO);
except:
print('Failed to create '+fpQ[-7]+'!')
if not os.path.exists(fpQ):
print('File path '+fpQ+' does not exist! Trying to create it...')
try:
os.makedirs(fpQ);print('Folder created successfully!');
os.chmod(fpQ,stat.S_IRWXU|stat.S_IRWXG|stat.S_IRWXO);
except:
print('Failed to create '+fpQ+'!')
#check which heads are enabled
for ii in range(4):
if PFN.HeadENB()[ii]:
RunName=RunName+headlist[ii]
weiarr=cls._weichall()#fix to give np.zeros(5002) when broken
total_print=''
#check so don't overwrite if someone forgets to change run number
wtoday=pickle.load(open(psfpQ+'w'+DateStr+'.p','rb'))
if os.path.exists(fpQ+RunName+'energies.txt'):
RunName=str(RunName+'DUPLICATE')
print(str('This run number already exists; run name '+RunName+' used'))
total_print+=str('This run number already exists; run name '+RunName+' used')
total_print+='\n'
else:
print(str('Run name: '+RunName+', shot number: '+str(len(wtoday)+1)))
total_print+=str('Run name: '+RunName+', shot number: '+str(len(wtoday)+1))
total_print+='\n'
print(datetime.now().strftime('%A, %d. %B %Y %I:%M%p'))
total_print+=str(datetime.now().strftime('%A, %d. %B %Y %I:%M%p'))
total_print+='\n'
if not save_flag:
print('This is a test run. Use save_flag=True if you want to save data.')
if save_flag:
for ii in range(4):
np.savetxt(str(fpQ+RunName+'ch'+str(ii)+'.txt'),cls._weichToPowerVsTime(PFN.HeadENB()[ii]*weiarr[-4+ii]))
WeightedSum=np.sum(np.expand_dims(PFN.HeadENB(),axis=1)*weiarr[-4:],axis=0)
if save_flag:
np.savetxt(str(fpQ+RunName+'chsum.txt'),cls._weichToPowerVsTime(WeightedSum))
PulseEnergies=[GLOBAL.EAB2w.get(),GLOBAL.EEF2w.get(),GLOBAL.EGH2w.get(),GLOBAL.EIJ2w.get()]
EnMess='***'
for ii in range(len(PulseEnergies)):
EnMess+=' {}: '.format(headlist[ii])
EnMess+=str(PulseEnergies[ii])
EnMess+=' J ***'
EnMess+=str(' total: '+str(np.sum(PulseEnergies))+' J ***')
#print(EnMess)
total_print+=EnMess
total_print+='\n'#start over...
total_print='Run: '+str(curr_run)+'\n'
wppvlist=[GLOBAL.HWPAB, GLOBAL.HWPEF, GLOBAL.HWPGH, GLOBAL.HWPIJ];
headstr='';wpstr='';
for ii in range(4):
if PFN.HeadENB()[ii]:
headstr+=headlist[ii]
wpstr=wpstr+headlist[ii]+': '+str(round(wppvlist[ii].get(),3))+', '
wpen=np.mean([PFN.HeadENB()[ii]*np.cos((np.pi/180)*2*wppvlist[ii].get())**2 for ii in range(4)])
total_print+=str('The following heads are enabled: '+headstr+'\n')
total_print+=str('The waveplate settings are: '+wpstr[:-2]+'\n')
total_print+=str('This is ~'+str(round(100*wpen,3))+'% of max energy.'+'\n')
total_print+=EMeters.EGall(return_txt=True);
old_energies = pickle.load(open(GLOBAL.PSFILEPATH+'preshot_energies.p','rb'))
new_energies = EMeters.EGall(return_energy_only=True);
energy_warning = False
for ii in range(len(old_energies)):
if new_energies[ii] == old_energies[ii]:
if new_energies[ii] != 0:
energy_warning = True
if energy_warning:
total_print+=efc.cstr('Caution: at least one non-zero pulse energy did not update on the previous shot!','BRY')
if save_flag:
np.savetxt(str(fpQ+RunName+'energies.txt'),PulseEnergies)
ep.lxysav(*cls._weichToPowerVsTime(WeightedSum),str(fpQ+RunName+'_'+str(int(round(np.sum(PulseEnergies))))+'J'),
abs_path=True,xlb='Time (ns)',ylb='Power (W)')
fileheads=['w','y','s1in1w','s42in1w','s42in2w','s'];
prepickle=[HAWG().ReadPulseHeights(),weiarr[0],weiarr[1],weiarr[2:6],weiarr[6:10],WeightedSum]
Psns=cls._Psns_get()#pickle.load(open(psfpQ+'Psns.p','rb'))
SSs=cls._SSs_get()#pickle.load(open(psfpQ+'SSs.p','rb'))
if display:
ep.llxy([cls._weichToPowerVsTime(WeightedSum),cls._PGToPowerVsTime(Psns=Psns, SSs=SSs, zzJQ=np.sum(PulseEnergies))],
xlb='Time (ns)',ylb='Power (W)',
xlim=[-1,1+np.sum(Psns)-0.25*np.sum([1 if SSs[ii][1] == SSs[ii][0] else 0 for ii in range(len(SSs)-1)])])
GLOBAL.WVFMHAWG.put(prepickle[0])
GLOBAL.WVFMYFE.put(LPL._TraceFormatting(prepickle[1],GLOBAL.LMapAB,LPL._PulseMax(Psns,SSs,np.sum(prepickle[1]))*10,AvgRange=1,FWHM=1))
GLOBAL.WVFM1IN1w.put(LPL._TraceFormatting(prepickle[2],GLOBAL.LMapAB,LPL._PulseMax(Psns,SSs,np.sum(prepickle[2]))*10,AvgRange=1,FWHM=1))
GLOBAL.WVFM2IN1w.put(LPL._TraceFormatting(np.sum(prepickle[3],axis=0),GLOBAL.LMapAB,LPL._PulseMax(Psns,SSs,np.sum(prepickle[3]))*10,AvgRange=1,FWHM=1))
GLOBAL.WVFM2IN2w.put(LPL._TraceFormatting(prepickle[5],GLOBAL.LMap2,LPL._PulseMax(Psns,SSs,np.sum(prepickle[5]))*1,AvgRange=1,FWHM=1))
for ii in range(len(fileheads)):
templist=pickle.load(open(psfpQ+fileheads[ii]+DateStr+'.p','rb'))
templist.append(prepickle[ii])
efc.pickledump2(templist,psfpQ+fileheads[ii]+DateStr+'.p')
if save_flag:
mecel = elog.ELog({'experiment':ExpName},user='mecopr',pw=pickle.load(open(psfpQ+'elogauth.p','rb')))
try:
mecel.post(total_print,attachments=[str(fpQ+RunName+'_'+str(int(round(np.sum(PulseEnergies))))+'J.png')],run=curr_run,tags=['laser'])
print('Auto-saved to eLog with run '+str(curr_run))
except:
try:
mecel.post(total_print,attachments=[str(fpQ+RunName+'_'+str(int(round(np.sum(PulseEnergies))))+'J.png')],tags=['laser'])
print('Auto-saved to eLog')
except:
print('Failed to auto-save to eLog')
@classmethod
def _psefc(cls,JreqQ=0,AQQ=0.0):
"""
Looks at last combined 2in2w waveform and calculates new suggested update to Highland AWG to step towards _PulseGoal
Also generates plot based on single-shot full-energy output pulse
(function not used much for shaping anymore since we predominantly shape at 10Hz now using the function psefc10Hz)
JreqQ is the energy scaling for the goal waveform (found using GLOBAL.PSNS and GLOBAL.SSS), used for calculating feedback and for plotting
AQQ is the step size used in the feedback calculation; leave set to 0 unless you really know what you are doing (number should be ~<0.1)!
"""
(DateStr, curr_run) = cls._psheaders()
psfpQ=GLOBAL.PSFILEPATH
Psns=cls._Psns_get()#pickle.load(open(psfpQ+'Psns.p','rb'))
SSs=cls._SSs_get()#pickle.load(open(psfpQ+'SSs.p','rb'))
Jscale=1
PulseEnergies=[GLOBAL.EAB2w.get(),GLOBAL.EEF2w.get(),GLOBAL.EGH2w.get(),GLOBAL.EIJ2w.get()]
wtoday=pickle.load(open(psfpQ+'w'+DateStr+'.p','rb'))
stoday=pickle.load(open(psfpQ+'s'+DateStr+'.p','rb'))
if int(JreqQ) < 5:
Jreq=np.sum(PulseEnergies)*Jscale #65
else:
Jreq=JreqQ
if Jreq>42:
BumpQ=2#3 or 4
else:
BumpQ=1.5
if np.abs(len(stoday[-1])-10000)<10:
bkgrdbuffer=380
elif np.abs(len(stoday[-1])-1000)<10:
bkgrdbuffer=38
else:
print('Warning: unanticipated pulse shape array length of '+str(len(stoday[-1]))+', bkgrd subtraction may be off...')
bkgrdbuffer=1
try:#this is to check for bad scope traces; rewrite this later
if np.sum(abs(stoday[-1])[:bkgrdbuffer]) > 1:
BumpQ=0
print('To whom it may concern: \n The next shot will not include an update to the pulse shaper because the saved scope trace seems abnormal. \n Did you switch to 10Hz and clear the scope traces before you saved them? or maybe did you disable or enable some amplifier arms before you saved the scope traces? or maybe you accidentally told the script to take more than 1 shot? \n If you answered yes to any of these questions, please don\'t do that again. (If you did something else out of the ordinary that could be linked to this anomaly, please let someone know.) \n\n Sincerely, \nThe Laser :)')
except:
pass
AQ=AQQ*BumpQ#.03,.035,.05 seems too big most of the time
if len(wtoday) > 0:
if len(stoday[-1]) == 10002:
mapnow=GLOBAL.LMap2#[50,1000]
elif len(stoday[-1]) == 1002:
mapnow=GLOBAL.LMapAB#[5,100]
else:
print('Unanticipated pulse shot array length: '+str(len(stoday[-1])));
print('Aborting...');return
wupd=cls._wIter2(stoday[-1],np.array(wtoday[-1])/28000.,Psns,SSs,Jreq,mapnow,AQ)
else:
print('No shots yet today; beginning with pre-loaded shape')
try:
wupd=HAWG().ReadPulseHeights()
except:
print('Error! HAWG')
return wupd
##EXECUTE THIS FILE FIRST TO DETERMINE THE UPDATED WAVEFORM
@classmethod
def psefc10Hz(cls,pwt='curr',numIterQ=50,AQQ=0.03,displayPlot=True,reloopPrompt=True,YFEbkgrdY=-.004,PtNumFront=3,PtNumBack=2,CorrFactorFront=.97,CorrFactorBack=1.0,avgfwhm=9,avgrange=1):
"""
Looks at last 10Hz YFE waveform, calculates new suggested update to Highland AWG to step towards pulse waveform target pwt
Also generates side-by-side plots of current YFE waveform & pwt (left) and of residual difference between the two (right)
Use the left plot for monitoring performance, use the right plot to watch for convergence/etc.
(This function is the underlying workhorse of the psrefrwvfm function)
pwt is the pulse waveform target, most often _ExponentialWave2(...) or the like
If pwt is zero then it is assumed that pwt is provided by _EW2(_Psns_get(), _SSs_get(), _YSSs_get(), offsetQ=0)
numIterQ is the number of iterations used in the convergence loop
AQQ is the step size used in each iteration of the convergence loop; value should be kept <<1 (usu. 0.03-0.2)
displayPlot allows one to suppress the plots described above (=False) or allow them to be displayed (=True)
reloopPrompt allows one the option of adding 50 more iterations after numIterQ has been reached (=True) or not (=False)
YFEbkgrdY applies an offset of the YFE diode trace vs pwt; use if noise floor of diode trace does not line up with pwt background
CorrFactorFront,CorrFactorBack,PtNumFront,PtNumBack are all useful if one desires to use LPL._FixEdges while converging on pwt
"""
#avgfwhm=9;avgrange=1;#25;
if GLOBAL.EVRLPLSSEC.get() != 43:
GLOBAL.EVRLPLSSEN.put(0);time.sleep(0.5);
GLOBAL.EVRLPLSSEC.put(43);time.sleep(0.5);
if GLOBAL.EVRLPLSSEN.get() != 1:
print('Pulse slicer not enabled! Enable now? [y/n]')
checkprompt=efc.getch_with_TO(TOsec=10,display=False)
if checkprompt in ('y','Y'):
GLOBAL.EVRLPLSSEN.put(1);time.sleep(0.5);
else:
print('Please try again later then!')
return False
if GLOBAL.MBCmode.get() == 0:
print('Bias dither enabled! Disable now? [y/n]')
checkprompt=efc.getch_with_TO(TOsec=10,display=False)
if checkprompt in ('y','Y'):
GLOBAL.MBCmode.put(1);time.sleep(0.5);
else:
print('No? Enjoy your energy and shape fluctuations then, I guess... :/')
Psns=cls._Psns_get()#pickle.load(open(psfpQ+'Psns.p','rb'))
SSs=cls._SSs_get()#pickle.load(open(psfpQ+'SSs.p','rb'))
if pwt == 'curr': #if pwt=0 then assume pwt specified by _EW2
pwt = cls._EW2(Psns=cls._Psns_get(), SSs=cls._SSs_get(), YSSs=cls._YSSs_get(), offsetQ=0)
pwtF=np.array(cls._TraceFormatting(pwt,GLOBAL.pwttfmap,1,AvgRange=1,FWHM=1))
GLOBAL.WVFMYFEGOAL.put(pwtF)
try:
SLA=LOSC('A');efc.ssleep();SLA._Open();efc.ssleep();SH=HAWG();efc.ssleep();SH._Open();efc.ssleep();#replaced w/LeCroyA
tempwv=SH._ReadPulseHeights();efc.ssleep();
SH._WritePulseHeights(140*[0]);efc.ssleep();
bkgrdnum=20;
iiter=0;
while (np.sum(SLA._rch(1)) > 1) and iiter < 20:
print('Warning: looks like the background did not clear!!')
iiter+=1
time.sleep(.1);
YFEbkgrdY=0*np.array(SLA._rch(1));efc.ssleep();
print('Acquiring background...')
for ii in range(bkgrdnum):
YFEbkgrdY += (np.array(SLA._rch(1))/bkgrdnum); time.sleep(0.1);
SH._WritePulseHeights(tempwv);time.sleep(1);
ops00=SLA._rch(1)-YFEbkgrdY;time.sleep(0.1);
print('..')
meanerr=[]
meanerr.append(np.sum(np.abs(pwtF[:26]-cls._TraceFormatting(ops00,GLOBAL.LMapAB,1,AvgRange=avgrange,FWHM=avgfwhm)[:26])/(pwtF[:26]+1e-3))/len(pwtF[:26]))
ops00F=cls._TraceFormatting(ops00,GLOBAL.LMapAB,1,AvgRange=avgrange,FWHM=avgfwhm)
print('Start loop')
if displayPlot:
plt.ion()
fig,axs=plt.subplots(1,2,figsize=(10,5),gridspec_kw={'hspace':0.4,'wspace':0.2});
#xdat=[[startposlist[ii]+alphlist[ii]*(-.1+.02*(jj)) for jj in range(11)] for ii in range(4)]
xdat=np.linspace(.25,35,140)
#ydat=[[0]*11 for ii in range(4)]
ax1,=axs[0].plot(xdat,ops00F); axs[0].set_xlabel('curr vs goal'); plt.pause(0.01); axs[0].plot(xdat,pwtF); plt.pause(0.01);
ax2,=axs[1].plot(list(range(len(meanerr))),meanerr); axs[1].set_xlabel('mean error'); plt.pause(0.01);
axss=[ax1,ax2]
#xdat=[]
LoopIsDone=False
while not LoopIsDone:
for ii in range(numIterQ):
if (ii+1)%50 == 0:
print(str('Iter:'+str(ii+1)))
ops0=SLA._rch(1)-YFEbkgrdY;time.sleep(0.025);####added.215 when 200mV/div instead of 100mV/div
if all(ops0 == ops00):
print('No scope update detected... no feedback applied!')
else:
rph=SH._ReadPulseHeights();time.sleep(0.025);
#pwtF=np.array(TraceFormatting2(pwt,[25,500],1))
##ops0F=TraceFormatting(ops0,[25,500],1)
ops0F=cls._TraceFormatting(ops0,GLOBAL.LMapAB,1,AvgRange=avgrange,FWHM=avgfwhm)
#epll([pwtF,ops0F])
meanerr.append(np.sum(np.abs(pwtF[:26]-ops0F[:26])/(pwtF[:26]+1e-3))/len(pwtF[:26]));
if displayPlot:
axss[0].set_data(xdat,ops0F); axs[0].relim(); axs[0].autoscale_view(True,True,True);
axss[1].set_data(list(range(len(meanerr))),meanerr); axs[1].relim(); axs[1].autoscale_view(True,True,True);
fig.canvas.draw_idle(); plt.pause(0.01);
usa0=cls._UpdatingShapingAlgorithm(pwtF,ops0F,np.array(rph)/28000.,AQQ)#.075#.25
usa0FE=cls._FixEdges(usa0,Psns,SSs,CorrFactorFront=CorrFactorFront,CorrFactorBack=CorrFactorBack,PtNumFront=PtNumFront,PtNumBack=PtNumBack)
#usa0FE=_FixEdges(usa0,[3,4.25],[[.98*100/8.0,100/8.0],[98,100]])
#epll([rph,usa0FE*28000.])
SH._WritePulseHeights(usa0FE*28000.);time.sleep(0.05);
ops00=ops0[:]
# if displayPlot:
# epll([pwtF,ops0F])
# epl(meanerr)
######check and aim
if reloopPrompt:
print('Would you like to try another 50 iterations? [enter y/n]',end='',flush=True)
checkprompt=efc.getch_with_TO(TOsec=30,display=False);
if checkprompt in ('y','Y'):
numIterQ=50
if len(checkprompt)>1:
try:
numIterQ=int(checkprompt[1:]) if int(checkprompt[1:]) < 250 else 250
except:
print('Could not parse loop # instruction -- using 50 instead.')
print('Performing {} more iterations!'.format(str(numIterQ)))
else:
LoopIsDone=True
else:
LoopIsDone=True
SH._Close();efc.ssleep();SLA._Close();efc.ssleep();
except:
print('Failed')
SH._Close();efc.ssleep();SLA._Close();efc.ssleep();
GLOBAL.CurrShapeLoadTime.put(float(datetime.now().strftime('%H%M%S.%Y%m%d')))
if displayPlot:
plt.ioff()
@classmethod
def _psupd(cls,newwavQ):
"""
Shortcut to updating the Highland AWG using the provided input waveform newwavQ
"""
cls._pshostcheck()
wupdt=newwavQ[:]
if max(wupdt) < 1.5:
wupdt=28000.0*np.array(wupdt)
try:
HAWG().WritePulseHeights(wupdt);
except:
print('Error, check HAWG!')
@classmethod
def psloadwvfm(cls,RecipeStrQ,WvGoal10HzHint=False):
"""
Loads a new waveform according to previously-saved recipe RecipeStrQ
Options for RecipeStrQ can be found using the LPL.psrecipes() command
(LPL.psmenu() allows you to choose a recipe to load without needing to type in the full name of it)
WvGoal10HzHint=True means that you will see the YFE waveform hint needed to make the pulse
and upon which the execution of LPL.psrefrwvfm() would be based; usually based on _ExponentialWave2
WvGoal10HzHint=False means that the YFE waveform hint will not be printed to terminal
"""
cls._pshostcheck()
print('Loading timestamp: '+datetime.now().strftime('%A, %d. %B %Y %I:%M:%S%p'))
try:
[Psns,SSs,YFE02mmCurr,YFE06mmCurr,YFE10mmCurr,NewWvfm,WvGoal10Hz] = pickle.load(open(GLOBAL.PSFILEPATH+'recipes/load'+RecipeStrQ+'.p','rb'))
except:
print('Recipe file '+GLOBAL.PSFILEPATH+'recipes/load'+RecipeStrQ+'.p\' not found.')
return
efc.pickledump2(Psns,GLOBAL.PSFILEPATH+'Psns.p')
cls._Psns_set(Psns)
efc.pickledump2(SSs,GLOBAL.PSFILEPATH+'SSs.p')
cls._SSs_set([elem for sublist in SSs for elem in sublist])
YFE.Set(2,YFE02mmCurr);time.sleep(.15);
YFE.Set(6,YFE06mmCurr);time.sleep(.15);
YFE.Set(10,YFE10mmCurr);time.sleep(.15);
if WvGoal10HzHint:
if len(WvGoal10Hz) > 5:
print('Hint for 10Hz waveform: '+WvGoal10Hz)
else:
print('No hint for 10Hz waveform available. MSG: \''+WvGoal10Hz+'\'')
pseparams=np.array(re.findall('ExponentialWave2\((\d+),(\d+\.\d+|\.\d+),(\d+),(\d+\.\d+|\.\d+),0,5002\)',WvGoal10Hz),dtype=np.float32);
pslparams=np.array(re.findall('LinearWave2\((\d+),(\d+\.\d+|\.\d+),(\d+),(\d+\.\d+|\.\d+),0,5002\)',WvGoal10Hz),dtype=np.float32);
yfegoal=cls._LinearWave2(500,0,1025,0,0,5002);
if (len(pslparams) > 0) or (len(pseparams) > 0):
for ii in range(len(pslparams)):
yfegoal+=cls._LinearWave2(pslparams[ii][0],pslparams[ii][1],pslparams[ii][2],pslparams[ii][3],0,5002)
for ii in range(len(pseparams)):
yfegoal+=cls._ExponentialWave2(pseparams[ii][0],pseparams[ii][1],pseparams[ii][2],pseparams[ii][3],0,5002)
else:
print('No wave extracted: '+WvGoal10Hz)
cls._YSSs_set([[pseparams[ii][1],pseparams[ii][3]] for ii in range(len(pseparams))])
yfegoalF=np.array(cls._TraceFormatting(yfegoal,GLOBAL.pwttfmap,1,AvgRange=1,FWHM=1))
GLOBAL.WVFMYFEGOAL.put(yfegoalF)
GLOBAL.WVFM2IN2wGOAL.put(cls._PulseGoal(Psns,SSs))
try:
cls._psupd(NewWvfm)
GLOBAL.CurrShape.put(RecipeStrQ)
GLOBAL.CurrShapeLoadTime.put(float(datetime.now().strftime('%H%M%S.%Y%m%d')))
print('New waveform loaded! ')
except:
print('Failed to load new waveform.')
@classmethod
def pssavewvfm(cls,RecipeStrQ=0,PsnsQ=0,SSsQ=0,YFEgetQ=0,TargetwlistDateQ='curr',TargetwindexQ=0,WvGoal10HzHint='none',YFEbkgrdY=-.004):
"""
Saves a new pulse shape recipe using user-provided parameters
if setting RecipeStrQ='0' or = of pattern (^E\d+J) (e.g., 'E40J'): function will guess the apropriate name for the recipe based on PsnsQ and SSsQ
otherwise: set RecipeStrQ equal to the name of the recipe you want
Example: leaving RecipeStrQ blank or using RecipeStrQ=0 results in a name of '10ns00grad' if PsnsQ=[10.25] and SSsQ=[[98,100]]
Example: using RecipeStrQ='E20J' results in a name of '04ns06ns020to10stepE20J' if PsnsQ=[4,6.25] and SSsQ=[[19,20],[98,100]]
Example: using RecipeStrQ='ThanosPulse1337' results in a name of 'ThanosPulse1337'
if setting PsnsQ=0 and SSsQ=0: function will load PsnsQ and SSsQ from GLOBAL.PSNS and GLOBAL.SSS, respectively
otherwise: set PsnsQ and SSsQ equal to the values you would like included in your recipe
Example: leaving PsnsQ and SSsQ blank or setting them equal to 0 results in
PsnsQ=[20.25] and SSsQ=[[85,100]] if GLOBAL.PSNS is [20.25] and GLOBAL.SSS is [[85,100]]
Example: setting PsnsQ=[2.25,2.25,2.25,2.25,2.25] and SSsQ=[[10,15],[15,25],[25,40],[40,80],[80,100]] sets the
values of PsnsQ and SSsQ explicitly without regard for GLOBAL.PSNS and GLOBAL.SSS
if setting YFEgetQ=0: function will read out and save current YFE eDrive currents as part of the recipe
otherwise: these can be saved explicitly, but there is almost never a good reason to do this anymore
if setting TargetwlistDateQ='curr': current Highland AWG waveform shot will be used to make the recipe
otherwise: set TargetwlistDateQ equal to the desired date and TargetwindexQ equal to the desired shot number
Example: TargetwlistDateQ='20220211' and TargetwindexQ=3 would make a recipe using the fourth shot on 2022Feb11
IMPORTANT: please set WvGoal10HzHint equal to the target waveform for the YFE output
Failure to do so will prevent psrefrwvfm() from working later, as it will have no provided target waveform
Example: use WvGoal10HzHint='_ExponentialWave2(500,.1,1025,.8,0,5002)' if _ExponentialWave2(500,.1,1025,.8,0,5002) was
the waveform used in psefc10Hz while you were creating the waveform for your recipe
"""
cls._pshostcheck()
if not isinstance(WvGoal10HzHint, str):
print('Warning: WvGoal10HzHint must be a string. Please put quotes around your shaping hint and try again!')
return False
if PsnsQ == 0:
PsnsQ=cls._Psns_get()#pickle.load(open(psfpQ+'Psns.p','rb'))
if SSsQ == 0:
SSsQ=cls._SSs_get()#pickle.load(open(psfpQ+'SSs.p','rb'))
if WvGoal10HzHint=='none':#try to figure out WvGoal10HzHint from YSSs
YSSsQ=cls._YSSs_get()
WvGoal10HzHint=cls._EW2stringhint(Psns=PsnsQ,SSs=SSsQ,YSSs=YSSsQ,YFEbkgrdY=YFEbkgrdY)
print('Saving timestamp: '+datetime.now().strftime('%A, %d. %B %Y %I:%M:%S%p'))
if TargetwlistDateQ == 'curr':
print('Using current Highland waveform...')
try:
NewWvfmQ=HAWG().ReadPulseHeights();
except:
print('Failed! HAWG');
return False
try:
wlastarr=pickle.load(open(GLOBAL.PSFILEPATH+'w'+cls._DateString()+'.p','rb'))
if wlastarr[-1] == NewWvfmQ:
print('Pulse looks equivalent to the most recent pulse, w'+cls._DateString()+'['+str(len(wlastarr)-1)+']')
WvGoal10HzHint=WvGoal10HzHint+';; w'+cls._DateString()+'['+str(len(wlastarr)-1)+']'
else:
WvGoal10HzHint=WvGoal10HzHint+';; sometime after most recent w'+cls._DateString()+'['+str(len(wlastarr)-1)+']'
except:
print('Failed to load most recent amplified shot.')
else:
wavehistQ=pickle.load(open(GLOBAL.PSFILEPATH+'w'+str(TargetwlistDateQ)+'.p','rb'))
NewWvfmQ=wavehistQ[TargetwindexQ][:]
WvGoal10HzHint=WvGoal10HzHint+', w'+str(TargetwlistDateQ)+'['+str(TargetwindexQ)+']'
if YFEgetQ == 0:
YFEgetQ=YFE.Get(display=False)
[YFE02mmCurrQ,YFE02mmCurrQ,YFE02mmCurrQ,YFE02mmCurrQ,YFE06mmCurrQ,YFE10mmCurrQ]=YFEgetQ
if (RecipeStrQ == 0) or (len(re.findall('(^E\d+J)',RecipeStrQ))>0):#learn formatting for pulse from Psns and SSs
if isinstance(RecipeStrQ, str):
if (RecipeStrQ[0] in ('e','E')):#allows to tag on an energy description, e.g. E20J for a 20J version of a pulse
RecipeStrQend=RecipeStrQ
else:
RecipeStrQend=''
else:
RecipeStrQend=''
RecipeStrQ=''
if len(PsnsQ) == 1:#if only one segment, it'll be ##ns##grad [+any energy description]
RecipeStrQ+='{:02}ns'.format(round(PsnsQ[0]))
gradval=round(SSsQ[0][1]-SSsQ[0][0])
RecipeStrQ+='{:02}grad'.format(0 if gradval < 3 else 99 if gradval>99 else gradval)
elif len(PsnsQ) == 2:#if two segments, it'll be ##ns##ns###to100step [+any grad info] [+any energy description]
RecipeStrQ+='{:02}ns{:02}ns'.format(round(PsnsQ[0]),round(PsnsQ[0]))
RecipeStrQ+='{:03}to100step'.format(round(SSsQ[0][1]))
if ((SSsQ[0][1] - SSsQ[0][0]) > 2) or ((SSsQ[1][1] - SSsQ[1][0]) > 2):
gradval=round(SSsQ[0][1]-SSsQ[0][0])
RecipeStrQ+='{:02}grad'.format(0 if gradval < 3 else 99 if gradval>99 else gradval)
gradval2=round(SSsQ[1][1]-SSsQ[1][0])
RecipeStrQ+='{:02}grad'.format(0 if gradval2 < 3 else 99 if gradval2>99 else gradval2)
else:#otherwise it'll be ##ns###to100ramp [+any energy description]
RecipeStrQ+='{:02}ns'.format(round(np.sum(PsnsQ)-0.25*np.sum([1 if SSsQ[ii][1] == SSsQ[ii][0] else 0 for ii in range(len(SSsQ)-1)])))
RecipeStrQ+='{:03}to100ramp'.format(round(SSsQ[0][0]))
RecipeStrQ+=RecipeStrQend
oldfilefound=True
try:
dummyQ = pickle.load(open(GLOBAL.PSFILEPATH+'recipes/load'+RecipeStrQ+'.p','rb'))
print('Old recipe found with same name: '+GLOBAL.PSFILEPATH+'recipes/load'+RecipeStrQ+'.p')
except:
oldfilefound=False
iiQ=0
while oldfilefound:
iiQ=iiQ+1
try:
pickle.load(open(GLOBAL.PSFILEPATH+'recipes/load'+RecipeStrQ+'_'+str(iiQ).zfill(2)+'.p','rb'));
except:
oldfilefound=False
dummyQ[-1]='**replaced on '+cls._DateString()+'** '+dummyQ[-1]
efc.pickledump2(dummyQ,GLOBAL.PSFILEPATH+'recipes/load'+RecipeStrQ+'_'+str(iiQ).zfill(2)+'.p')
print('Saved old recipe as '+GLOBAL.PSFILEPATH+'recipes/load'+RecipeStrQ+'_'+str(iiQ).zfill(2)+'.p')
try:
efc.pickledump2([PsnsQ,SSsQ,YFE02mmCurrQ,YFE06mmCurrQ,YFE10mmCurrQ,NewWvfmQ,WvGoal10HzHint],GLOBAL.PSFILEPATH+'recipes/load'+RecipeStrQ+'.p')
print('Saved new recipe as '+GLOBAL.PSFILEPATH+'recipes/load'+RecipeStrQ+'.p')
except:
print('Failed to save new recipe.')
@classmethod
def psviewwvfm(cls,RecipeStrQ='none',TargetwlistDateQ='curr',TargetwindexQ=0,WvGoal10HzHint=False):
"""
Displays waveform and parameters that are part of a previously-saved recipe
(LPL.psmenu() allows you to choose a recipe to view without needing to type in the full name of it)
Set RecipeStrQ equal to the recipe you want to view, e.g. RecipeStrQ='10ns00grad'
:if no RecipeStrQ is provided, displayed pulse will be chosen according to TargetwlistDateQ and TargetwindexQ
if using TargetwlistDateQ='curr' and TargetwindexQ=0 with RecipeStrQ='none' or blank: most recent pulse will be shown
:otherwise choose the date and shot number of the pulse desired
:Example: TargetwlistDateQ='20220214' and TargetwindexQ=6 will display the seventh saved pulse from 2022Feb14
if using WvGoal10HzHint=True: any saved waveform hints will be printed to terminal; setting WvGoal10HzHint=False results in no printout
:this is useful primarily when hoping to make adjustments to a recipe or to create a completely new recipe
:i.e. use the hint to make modifications to the returned _ExponentialWave2(...) parameters suitable for your new needs
"""
cls._pshostcheck()
if RecipeStrQ == 'none':
if TargetwlistDateQ == 'curr':
foundlastshot=False
iidQ=0
while not foundlastshot:
try:
wlastarr=pickle.load(open(GLOBAL.PSFILEPATH+'w'+str(int(cls._DateString())-iidQ)+'.p','rb'))
slastarr=pickle.load(open(GLOBAL.PSFILEPATH+'s'+str(int(cls._DateString())-iidQ)+'.p','rb'))
wlast=wlastarr[-1][:]
slast=slastarr[-1][:]
print('Retrieving most recent shot: w'+str(int(cls._DateString())-iidQ)+'['+str(len(wlastarr)-1)+']')
foundlastshot=True
except:
iidQ=iidQ+1
else:
try:
wlastarr=pickle.load(open(GLOBAL.PSFILEPATH+'w'+str(TargetwlistDateQ)+'.p','rb'))
slastarr=pickle.load(open(GLOBAL.PSFILEPATH+'s'+str(TargetwlistDateQ)+'.p','rb'))
wlast=wlastarr[int(TargetwindexQ)]
slast=slastarr[int(TargetwindexQ)]
except:
print('Failed to load at given date and index: '+TargetwlistDateQ+', '+str(TargetwindexQ))
Psns=cls._Psns_get()
SSs=cls._SSs_get()
ep.l(wlast)
ep.llxy([cls._weichToPowerVsTime(slast), cls._PGToPowerVsTime(Psns=Psns, SSs=SSs, zzJQ=np.sum(slast))],
xlb='Time (ns)',ylb='Power (W)',
xlim=[-1,1+np.sum(Psns)-0.25*np.sum([1 if SSs[ii][1] == SSs[ii][0] else 0 for ii in range(len(SSs)-1)])])
return
else:
try:
[Psns,SSs,YFE02mmCurr,YFE06mmCurr,YFE10mmCurr,NewWvfm,WvGoal10Hz] = pickle.load(open(GLOBAL.PSFILEPATH+'recipes/load'+RecipeStrQ+'.p','rb'))
except:
print('Recipe file '+GLOBAL.PSFILEPATH+'recipes/load'+RecipeStrQ+'.p\' not found.')
return False
print('Retrieved recipe: load'+RecipeStrQ)
print('Psns: '+str(list(Psns))+', SSs: '+str([list(SSs_sub) for SSs_sub in SSs]))
print('YFEcurr:: 2mm: '+'{:5.1f}'.format(YFE02mmCurr)+', 6mm: '+'{:5.1f}'.format(YFE06mmCurr)+', 10mm: '+'{:5.1f}'.format(YFE10mmCurr))
print('Extra info: '+WvGoal10Hz)
ep.l(NewWvfm)
try:
tempstr=WvGoal10Hz[-18:]
wstrinx=tempstr.index('w')
wstrinx0=wstrinx-len(tempstr)
loaddate=tempstr[wstrinx0+1:wstrinx0+9]
loadindx=int(tempstr[wstrinx0+10:-1])
saskarr=pickle.load(open(GLOBAL.PSFILEPATH+'s'+loaddate+'.p','rb'))
ep.llxy([cls._weichToPowerVsTime(saskarr[loadindx]),cls._PGToPowerVsTime(Psns=Psns, SSs=SSs, zzJQ=np.sum(saskarr[loadindx]))],
xlb='Time (ns)',ylb='Power (W)',
xlim=[-1,1+np.sum(Psns)-0.25*np.sum([1 if SSs[ii][1] == SSs[ii][0] else 0 for ii in range(len(SSs)-1)])])
print('Pulse energy was ~{}J'.format(np.sum(saskarr[loadindx])))
except:
print('Failed to load 2w waveform for display.')
return NewWvfm
@classmethod
def psrefrwvfm(cls,RecipeStrQ='latest',numStepsQ=50,stepSizeQ=0.25,YFEbkgrdY=-.004,displayPlot=True,reloopPrompt=True):
"""
Refreshes 10Hz YFE waveform according to target shape given by previously-saved recipe
If you do not specify a recipe in RecipeStrQ, it will automatically begin refreshing the shape loaded most recently
:otherwise: use RecipeStrQ='10ns00grad' to begin refreshing from the
current waveform towards the YFE goal waveform for the 10ns00grad pulse
Use numStepsQ to specify the number of 10Hz iterations to take
Use stepSizeQ to specify the size of the corrective step per iteration; should be <<1 (usu. 0.01-0.3)
Use displayPlot=True to display the waveform as it converges to the goal (as with psefc10Hz)
:using displayPlot=False will not show the plot
Use reloopPrompt=True to give the operator an opportunity to add more iterations after the first numStepsQ have elapsed
:using reloopPrompt=False will cause the function to terminate as soon as numStepsQ have elapsed
"""
cls._pshostcheck()
print('Loading timestamp: '+datetime.now().strftime('%A, %d. %B %Y %I:%M:%S%p'))
if not YFE.OnCheck(display=False):
print('YFE does not appear to be on! Check YFE status first!')
return False
#load and extract the pulse target from the desired recipe
if RecipeStrQ=='latest':#if refreshing most recent wvfm, just use _EW2 and RBV of Psns, SSs, and YSSs
yfegoal=cls._EW2(Psns=cls._Psns_get(), SSs=cls._SSs_get(), YSSs=cls._YSSs_get())
else:#if not refreshing most recent wvfm
try:
[Psns,SSs,YFE02mmCurr,YFE06mmCurr,YFE10mmCurr,NewWvfm,WvGoal10HzHint] = pickle.load(open(GLOBAL.PSFILEPATH+'recipes/load'+RecipeStrQ+'.p','rb'))
except:
print('Recipe file '+GLOBAL.PSFILEPATH+'recipes/load'+RecipeStrQ+'.p\' not found.')
return
print('Hint text: '+WvGoal10HzHint)
pseparams=np.array(re.findall('ExponentialWave2\((\d+),(\d+\.\d+|\.\d+),(\d+),(\d+\.\d+|\.\d+),0,5002\)',WvGoal10HzHint),dtype=np.float32);
pslparams=np.array(re.findall('LinearWave2\((\d+),(\d+\.\d+|\.\d+),(\d+),(\d+\.\d+|\.\d+),0,5002\)',WvGoal10HzHint),dtype=np.float32);
try:
YFEbkgrdY=float(re.findall('YFEbkgrdY\s?=\s?([-+]?\d+\.?\d*|[-+]?\.\d+)',WvGoal10HzHint)[0]);
except:
YFEbkgrdY=YFEbkgrdY
yfegoal=cls._LinearWave2(500,0,1025,0,0,5002);
if (len(pslparams) > 0) or (len(pseparams) > 0):
for ii in range(len(pslparams)):
yfegoal+=cls._LinearWave2(pslparams[ii][0],pslparams[ii][1],pslparams[ii][2],pslparams[ii][3],0,5002)
for ii in range(len(pseparams)):
yfegoal+=cls._ExponentialWave2(pseparams[ii][0],pseparams[ii][1],pseparams[ii][2],pseparams[ii][3],0,5002)
else:
print('No wave extracted: '+WvGoal10HzHint)
return
#close the shutters
#print('NOT closing the shutters... hope you\'re protecting your sample!')
print('Closing all shutters...')
TTL_shutter.Toggle('closeall',display=False)#close all the shutters
time.sleep(4)
try:#used to make sure shutters re-open even in case of error or KeyboardInterrupt
#check if laser is on
if np.sum(YFE.Get(display=False)) < 400:
if np.sum(YFE.Get(display=False)) < 20:
print('WARNING: YFE seems to be off... Attempting to turn on YFE...')
YFE.On(CtrlChk=False);
else:
print('WARNING: eDrive currents seem low...')
#prechk = False
#turn off bias dither; AUTO is 0; MAN is 1
if GLOBAL.MBCmode.get() != 1:
print('Turning off MBC bias dither...',end='',flush=True);
currbias=GLOBAL.MBCbias.get();
GLOBAL.MBCmode.put(1);#set bias mode to MAN
efc.dotsleep(3);
GLOBAL.MBCbias.put(currbias+10);
print('Testing bias responsivity...',end='',flush=True);
for ii in range(2):
time.sleep(1);print('..',end='',flush=True);
if np.abs(GLOBAL.MBCbias.get() - (currbias+10)) < 3:
GLOBAL.MBCbias.put(currbias);
efc.dotsleep(2);
else:
print('*')
print('WARNING: MBC not responding!!')
#else:
# print('MBC is not safe! Resetting the MBC...')#problem is here...
# resetMBC();
#set and enable 10Hz output
print('Set and enable 10Hz output')
if GLOBAL.EVRLPLSSEC.get() != 43:
GLOBAL.EVRLPLSSEN.put(0);time.sleep(0.75);
GLOBAL.EVRLPLSSEC.put(43);time.sleep(0.75);
if GLOBAL.EVRLPLSSEN.get() != 1:
GLOBAL.EVRLPLSSEN.put(1);time.sleep(0.75);
#run the update code
print('Refreshing the YFE wavefront...')
cls.psefc10Hz(pwt=yfegoal,numIterQ=numStepsQ,AQQ=stepSizeQ,displayPlot=displayPlot,reloopPrompt=reloopPrompt,YFEbkgrdY=YFEbkgrdY)
#reset to single shot on pulse picker
GLOBAL.EVRLPLSSEC.put(182);time.sleep(0.75);
GLOBAL.EVRLPLSSEN.put(1);time.sleep(0.75);
#re-open shutters
print('Opening all shutters...')
TTL_shutter.Toggle('openall',display=False);#open all the shutters
MBC.Reset();
YFE.SetAll(True,displayQ=False);
except:#used to make sure shutters re-open even in case of error or KeyboardInterrupt
#reset to single shot on pulse picker
GLOBAL.EVRLPLSSEC.put(182);time.sleep(0.75);
GLOBAL.EVRLPLSSEN.put(1);time.sleep(0.75);
#re-open shutters
#print('Opening all shutters...')
#toggle_TTL_shutter('openall',display=False);#open all the shutters
MBC.Reset();
YFE.SetAll(True,displayQ=False);
@classmethod
def psrecipes(cls):
"""
Prints and returns a list of all previously-saved pulse recipes
LPL.psmenu() shows this list but also allows you to load or view a recipe
"""
allrec=glob.glob(GLOBAL.PSFILEPATH+'recipes/*.p');
oldrec=glob.glob(GLOBAL.PSFILEPATH+'recipes/*_*.p')
currec=[ext[60:-2] for ext in allrec if ext not in oldrec];currec.sort();
return currec
@classmethod
def psmenu(cls):
"""
Allows you to select a recipe to load or view
"""
recipelist=cls.psrecipes()
print('Pulse recipes:')
for pair in list(zip(list(range(len(recipelist))),recipelist)):
if pair[0]:
print(pair)
print('{}{}{}'.format('Would you like to load or view a recipe? Enter L7 to load recipe 7.\n',
'Enter V16 to view recipe 16 first before having the option to load it.\n',
'Enter Q to quit.'))
checkprompt=efc.input_with_TO(TOsec=30,display=False)
if checkprompt:
if checkprompt[0] not in ('v','V','l','L'):
print('Exiting...')
return
try:
recipenum=int(checkprompt[1:])
if recipenum > len(recipelist):
raise Exception
except:
print('I\'m sorry, that\'s not a valid recipe number. Try again!')
return False
if checkprompt.lower().startswith('l'):
print('Loading {} pulse!'.format(recipelist[recipenum]))
cls.psloadwvfm(recipelist[recipenum])
return
elif checkprompt.lower().startswith('v'):
print('Displaying {} pulse!'.format(recipelist[recipenum]))
cls.psviewwvfm(recipelist[recipenum],WvGoal10HzHint=True);
#add load option back in after fixing plot blocking...
#print('Would you like to load it? [y/n]')
#checkprompt2=efc.getch_with_TO(20,display=False)
#if checkprompt2 not in ('y','Y'):
# print('Hope you can find someething else you like! :D')
# return
#else:
# print('Loading {} pulse!'.format(recipelist[recipenum]))
# cls.psloadwvfm(recipelist[recipenum])
# return
return
else:
print('Exiting menu...')
return
else:
print('Call me back when you\'re ready to order! :D')#handles initial prompt timeout
@classmethod
def pspreshot(cls,MBC_bypass=False):
"""
Prepares and checks state of the laser for taking a single full-energy shot
Because of checks/safeguards, this is the preferred function to use before taking a shot
"""
cls._pshostcheck()
if not YFE.OnCheck(display=False):
print('WARNING: YFE seems to be off... Attempting to turn on YFE...')
YFE.On(CtrlChk=False);
if np.sum(YFE.Get(display=False)) < 550:
if np.sum(YFE.Get(display=False)) < 20:
print('WARNING: YFE seems to be turned down. Attempting to turn up YFE...')
YFE.SetAll(True,displayQ=True)
if MBC.IsSafe():
print('Turning off MBC bias dither...',end='',flush=True);
currbias=GLOBAL.MBCbias.get();
GLOBAL.MBCmode.put(1);#set bias mode to MAN
efc.dotsleep(3);
GLOBAL.MBCbias.put(currbias+10);
print('Testing bias responsivity...',end='',flush=True);
for ii in range(2):
time.sleep(1);print('..',end='',flush=True);
if np.abs(GLOBAL.MBCbias.get() - (currbias+10)) < 3:
GLOBAL.MBCbias.put(currbias);
efc.dotsleep(2);
prechk = True
else:
print('*')
print('WARNING: MBC not responding!!')
prechk = False
else:
if not MBC_bypass:
print('MBC is not safe! Resetting the MBC...')
MBC.Reset();##up above, check emission AND check currents???
YFE.SetAll(True,displayQ=False)
returnval=cls.pspreshot()
return returnval
else:
prechk = True
if GLOBAL.CurrShape.get().lower() == 'yfeedge':
print('{}{}'.format(efc.cstr('WARNING: ','BRW,BBRR'),
efc.cstr('last loaded pulse was YFEedge! Are you SURE you want to continue? [y/n]','BRW,BBRR')))
resp=efc.getch()
if resp.lower()[0] == 'y':
print('I '+efc.cstr('REALLY','BRR')+' hope you know what you\'re doing...')
else:
print('Good choice. Please try again when you are ready.')
return False
#if GLOBAL.EVRLPLLAMPEC.get() != 182:
#GLOBAL.EVRLPLLAMPEN.put(0)
GLOBAL.EVRLPLLAMPEC.put(182)
#if GLOBAL.EVRLPLSSEC.get() != 182:
#GLOBAL.EVRLPLSSEN.put(0)
GLOBAL.EVRLPLSSEC.put(182)
#if GLOBAL.EVRLPLLAMPEN.get() != 1:
GLOBAL.EVRLPLLAMPEN.put(1)
#if GLOBAL.EVRLPLSSEN.get() != 1:
GLOBAL.EVRLPLSSEN.put(1)
wppvlist=[GLOBAL.HWPAB, GLOBAL.HWPEF, GLOBAL.HWPGH, GLOBAL.HWPIJ];
headstr='';wpstr='';headlist=['AB','EF','GH','IJ'];
for ii in range(4):
if PFN.HeadENB()[ii]:
headstr+=headlist[ii]
wpstr=wpstr+headlist[ii]+': '+str(round(wppvlist[ii].get(),3))+', '
wpen=np.mean([PFN.HeadENB()[ii]*np.cos((np.pi/180)*2*wppvlist[ii].get())**2 for ii in range(4)])
print('The following heads are enabled: '+headstr)
print('The waveplate settings are: '+wpstr[:-2])
print('This is ~'+str(round(100*wpen,3))+'% of max energy.')
Psns=cls._Psns_get()#pickle.load(open(psfpQ+'Psns.p','rb'))
SSs=cls._SSs_get()#pickle.load(open(psfpQ+'SSs.p','rb'))
print('Current pulse target is: '+str(list(Psns))+' ns, '+str([list(SSs_sub) for SSs_sub in SSs])+' % of max power.')
print('The most recently loaded pulse shape is: '+GLOBAL.CurrShape.get()+' (load/refresh stamp: {:015.8f})'.format(GLOBAL.CurrShapeLoadTime.get()))
not_charging='';not_enabled='';yes_enabled='';headchanlist=['CD','A','B','E','F','G','H','I','J'];
temppv1=[GLOBAL.PFNCDEN,GLOBAL.PFNAEN,GLOBAL.PFNBEN,GLOBAL.PFNEEN,GLOBAL.PFNFEN,GLOBAL.PFNGEN,GLOBAL.PFNHEN,GLOBAL.PFNIEN,GLOBAL.PFNJEN]
temppv2=[GLOBAL.PFNCDCS,GLOBAL.PFNACS,GLOBAL.PFNBCS,GLOBAL.PFNECS,GLOBAL.PFNFCS,GLOBAL.PFNGCS,GLOBAL.PFNHCS,GLOBAL.PFNICS,GLOBAL.PFNJCS]
for ii in range(9):
if temppv1[ii].get() == 0:
not_enabled+=headchanlist[ii]
if temppv1[ii].get() == 1:
yes_enabled+=headchanlist[ii]
if (temppv1[ii].get() == 1) and (temppv2[ii].get() == 0):
not_charging+=headchanlist[ii]
if len(not_charging)>0:
print('** WARNING: The following heads are enabled but NOT charging: '+not_charging)
TTL_shutter.Toggle('open'+yes_enabled+'wwxx',display=False);time.sleep(1.5);#make sure all shutters are open...
TTL_shutter.Toggle('close'+not_enabled,display=False);time.sleep(1.5);#close shutters that aren't enabled
print('Current shutter status:')
TTL_shutter.Status(display=True);
efc.pickledump2(EMeters.EGall(return_energy_only=True), GLOBAL.PSFILEPATH+'preshot_energies.p')#used to check if energy meters update or not
return prechk
#waveform pre-check? verify shutters are open?
@classmethod
def pspostshot(cls,save_flag=True,display=False):
"""
Executes post-shot routine for saving data and returning laser to appropriate state
This is the preferred function to use after taking a shot (_psacqx() is inside)
save_flag=True means that the shot data will be saved to the eLog of the current experiment (in addition to internal laser records)
save_flag=False means that the shot data will be saved to internal laser records only, NOT to user eLog
display=True means that the acquired shot's energy-weighted, combined 2in2w waveform will be plotted as power vs. time
display=False means that no waveform plot will be generated upon execution of the function
"""
GLOBAL.EVRLPLLAMPEN.put(0)
GLOBAL.EVRLPLSSEN.put(0)
cls._pshostcheck()
cls._psacqx(save_flag=save_flag,display=display)#took out _noLecroyA
#cls._psefc();
TTL_shutter.Toggle('openall',display=False);#make sure all shutters are open again...
print('Resetting bias tracking...')
MBC.Reset();
YFE.SetAll(True);
EMeters.E_synth_refresh();
@classmethod
def SHG_opt(cls,armsQ='ABEFGHIJ'):#check for trace height;#All shutters must start in the open state...
"""
Optimizes the tuning of the doubler angles to maximize the conversion efficiency of the arms of the LPL
(SHG_opt is useful especially if ambient conditions change; sometimes needs to be run fairly regularly)
Prompts included to help insure system is in appropriate state for optimization
Entire function should take only a couple of minutes; live display allows you to monitor progress
If new value is too close to edge of window, it is recommended to optimize that arm again
(If the doublers are way far off (no detection), tune the Newport motor back until it is closer)
If leaving armsQ blank: all arms are optimized in the order of AB, EF, GH, IJ
:EXAMPLE: SHG_opt() is equivalent to SHG_opt('ABEFGHIJ') and optimizes all arms
Instead, one may choose specific arms to optimize
:EXAMPLE: SHG_opt(armsQ='ABIJ') or SHG_opt('ABIJ') optimizes only arms AB and IJ
:EXAMPLE: SHG_opt(armsQ='EF') or SHG_opt('EF') optimizes only arm EF
"""
print('Running this routine requires ALL TTL shutters to begin in the open state! The YFE must be on with the bias dither initially enabled!')
if np.sum(TTL_shutter.Status(display=False)[-1]) > 0:
print('Warning! The shutters don\'t all appear to be open! ',end='',flush=True);TTL_shutter.Status(display=True);
else:
print('(Shutters seem OK...)')
if not YFE.OnCheck(display=False):
print('Warning! The YFE doesn\'t appear to be on! ',end='',flush=True);YFE.OnCheck(display=True);
else:
print('(YFE emission seems OK...)')
if np.sum(YFE.Get(display=False)) < 550:
print('Warning! The YFE doesn\'t appear to be turned up! ');YFE.Get(display=True);
else:
print('(YFE current seems OK...)')
if MBC.ModeCheck() != 0:
print('(Warning! The MBC doesn\'t appear to be in AUTO mode!)')
else:
print('(MBC mode seems OK...)')
print('Are you sure you are ready to proceed? [enter y/n]',end='',flush=True)
checkprompt=efc.getch_with_TO(TOsec=10,display=False);
if checkprompt not in ('y','Y'):
print('Try again later then!');
return
else:
print('OK, I hope you know what you\'re doing!')
HWP.On('all',set_T=1)
armlist=['AB','EF','GH','IJ']
#YFEoff();YFEon();
GLOBAL.MBCmode.put(1)#set MAN mode on MBC
if np.sum(YFE.Get(display=False)) < 100:
print('Check YFE before optimizing!')
optwvfm=pickle.load(open(GLOBAL.PSFILEPATH+'opttrace.p','rb'));
try:
oldwvfm=HAWG().ReadPulseHeights();
HAWG().WritePulseHeights(optwvfm);
except:
print('Failed! HAWG')
SHGpvlist=[GLOBAL.SHGABmot, GLOBAL.SHGEFmot, GLOBAL.SHGGHmot, GLOBAL.SHGIJmot];
print('Closing all shutters...')
TTL_shutter.Toggle('closeall',display=False);#close all the shutters
time.sleep(4)
GLOBAL.EVRLPLSSEC.put(43);GLOBAL.EVRLPLSSEN.put(1);#enable these...
try:
tempchk1=LOSC('a').rch(1);time.sleep(.15);tempchk2=LOSC('a').rch(1);
if np.sum(np.abs(tempchk1-tempchk2))<1e-6:
print('Warning: scope trace doesn\'t appear to be updating, please check scope! Abort? [enter y/n]')
checkprompt=efc.getch_with_TO(TOsec=10,display=False);
if checkprompt not in ('n','N'):
print('Try again later then!');
HAWG().WritePulseHeights(oldwvfm);
return
else:
print('OK, I hope you know what you\'re doing!')
except:
print('Scope error, check scope status! Aborting...')
HAWG().WritePulseHeights(oldwvfm);
return
startposlist=[SHGrbv.get() for SHGrbv in SHGpvlist];
newposlist=startposlist[:]
alphlist=[1,0.5,0.5,1];
for ii in range(4):
if armlist[ii] in armsQ:#only prep the stage if it's going to be used
SHGpvlist[ii].put(startposlist[ii]+alphlist[ii]*(-.1+.01*0))
currentshutter=0;#trying to re-open a shutter in case of failure...
#set up all the plotting stuff
plt.ion()
fig,axs=plt.subplots(2,2,gridspec_kw={'hspace':0.4,'wspace':0.3})
xdat=[[startposlist[ii]+alphlist[ii]*(-.1+.02*(jj)) for jj in range(11)] for ii in range(4)]
ydat=[[0]*11 for ii in range(4)]
ax1,=axs[0,0].plot(xdat[0],ydat[0]); axs[0,0].set_xlabel('AB'); plt.pause(0.01);
ax2,=axs[0,1].plot(xdat[1],ydat[1]); axs[0,1].set_xlabel('EF'); plt.pause(0.01);
ax3,=axs[1,0].plot(xdat[2],ydat[2]); axs[1,0].set_xlabel('GH'); plt.pause(0.01);
ax4,=axs[1,1].plot(xdat[3],ydat[3]); axs[1,1].set_xlabel('IJ'); plt.pause(0.01);
axss=[ax1,ax2,ax3,ax4]
try:
SLA=LOSC('A');SLA._Open();#changed to LecroyA since repair
for ii in range(4):
if armlist[ii] in armsQ:
print('Begin optimizing '+armlist[ii]+'... ',end='',flush=True);
shgarmdatax,shgarmdatay=[],[]
TTL_shutter.Toggle('open'+armlist[ii],display=False);currentshutter=ii;time.sleep(4);print('Shutter opened!');#open one shutter
for jj in range(11):
print('.',end='',flush=True)
SHGpvlist[ii].put(startposlist[ii]+alphlist[ii]*(-.1+.02*(jj)));time.sleep(2.5);#step to new position
curr_x=SHGpvlist[ii].get();curr_y=np.max(SLA._rch(3));time.sleep(.15);#in testing, max is more stable than sum
if curr_y > 0.005:#threshold so don't skew fit with noise; max is ~~10x this
shgarmdatax.append(curr_x);shgarmdatay.append(curr_y);#save x and y
print('.',end='',flush=True)
ydat[ii][jj]=curr_y
axss[ii].set_data(xdat[ii],ydat[ii])
axs[ii//2,ii%2].set_ylim((min(ydat[ii]),max(ydat[ii])))
plt.pause(0.01)
#axs[ii//2,ii%2].autoscale(True,True,True)
fig.canvas.draw_idle()
plt.pause(0.01)
print('*')
qfit=np.polyfit(shgarmdatax,shgarmdatay,2);newpos=qfit[1]/(-2*qfit[0]);#find fit and new max
if np.abs(startposlist[ii]-newpos)<.15:
SHGpvlist[ii].put(newpos);newposlist[ii]=newpos;
print('SHG position on arm '+armlist[ii]+' changed from '+str(round(startposlist[ii],4))+' to '+str(round(newpos,4)))
else:
print('Failed! New SHG position on arm '+armlist[ii]+' seems too far off... '+str(round(newpos,4))+' from '+str(round(startposlist[ii],4))+'... Restoring...')
SHGpvlist[ii].put(startposlist[ii])
TTL_shutter.Toggle('close'+armlist[ii],display=False);currentshutter=0;#close that shutter;
#xpq=np.arange(startposlist[ii]+alphlist[ii]*(-.1+.02*(-1)),startposlist[ii]+alphlist[ii]*(-.1+.02*(11)),.0001);
qfitp=np.poly1d(qfit);
axs[ii//2,ii%2].plot(xdat[ii],qfitp(xdat[ii]))
axs[ii//2,ii%2].relim();plt.pause(0.01);
axs[ii//2,ii%2].autoscale(True,True,True)
fig.canvas.draw_idle()
plt.pause(0.01)
#epllxy([[shgarmdatax,shgarmdatay],[xpq,qfitp(xpq)]],xlb=armlist[ii])
else:
print('Skipping '+armlist[ii]+'...')
pass
SLA._Close();time.sleep(.15);#changed to LeCroyA
except:
print('Failed! Restoring original values and attempting to re-open most-recent shutter... you should verify!')
SLA._Close();time.sleep(.15);#changed to LeCroyA
if currentshutter > 0:
TTL_shutter.Toggle('open'+armlist[currentshutter],display=False);
for ii in range(4):
SHGpvlist[ii].put(startposlist[ii]);newposlist[ii]=startposlist[ii];
time.sleep(2);#need time so that last shutter trigger ends before trying to open IJ
try:
HAWG().WritePulseHeights(oldwvfm);
except:
print('Error! Check waveform!')
GLOBAL.EVRLPLSSEN.put(0);#disable PC before re-opening shutters
datestamp=int(datetime.now().strftime('%Y%m%d%H%M%S'))
SHGlog=pickle.load(open(GLOBAL.PSFILEPATH+'SHG_opt_log.p','rb'))
SHGlog.append([datestamp,[newposlist[ii] for ii in range(4)]])
efc.pickledump2(SHGlog,GLOBAL.PSFILEPATH+'SHG_opt_log.p')
TTL_shutter.Toggle('openall',display=False);#open all the shutters
MBC.Reset();YFE.SetAll(True);#reset bias...
plt.ioff()
def On():
"""Turns on LPL front-end laser system; shortcut for YFE.On() command"""
result = YFE.On()
return result
def Off():
"""Turns off LPL front-end laser system; shortcut for YFE.Off() command"""
result = YFE.Off()
return result
class ep:
"""
Easy Plotting class for convenient shorthand ways of plotting data; nothing special, just lazy!
Typical usage via ep.[command]
Possible commands include:
:l #quick plot of single array of y values
:lxy #quick plot of single array of y values with x values also specified
:lxyloglog #quick xy plot with log x- and y-axes
:lsav #save plot of single array of y values
:lxysav #save plot of single array of y values with x values also specified
:lcomp #quick plot meant to compare diode waveform to pulse goal
:lcsv #quick plot of single array from csv file
:llcsv #quick plot of list of arrays from csv file
:rcsv #read in array from csv file
:ll #quick plot of list of arrays of y values
:llxy #quick plot of list of arrays of y values with x values also specified
:llxyloglog #quick xy plot of several lists with log x- and y-axes
:llt #plot list of arrays of values according to a time mapping
:llcomp #plot list of diode waveforms along with target waveform
:lfft #quick plotting of FFT of provided waveform
potential future work
- consider consolidating some functions into one
- e.g. getch with and without TimeOut?
"""
def l(listq,xlb='none',ylb='none',xlim='none',ylim='none'):
"""
Shorthand plotting function for a single list of y-values
Example: ep.l([1,2,4]) generates a plot of [1,2,4]
Optional: Set the labels for the x-axis and y-axis with xlb and ylb, respectively
Optional: Set the limits for the x-axis and y-axis with xlim and ylim, respectively
"""
df1=plt.figure()
plt.plot(listq);
if xlb != 'none':
plt.xlabel(xlb)
if ylb != 'none':
plt.ylabel(ylb)
if xlim != 'none':
try:
plt.xlim(xlim)
except:
pass
if ylim != 'none':
try:
plt.ylim(ylim)
except:
pass
df1.show()
return
def lxy(listxq,listyq,xlb='none',ylb='none',xlim='none',ylim='none'):
"""
Shorthand plotting function for a single list of x-values and y-values
Be sure to include the list of (equal-length) x-values and y-values separately
Example: ep.lxy([10,20,30],[1,5,18]) generates a plot of points (10,1), (20,5), and (30,18)
Optional: Set the labels for the x-axis and y-axis with xlb and ylb, respectively
Optional: Set the limits for the x-axis and y-axis with xlim and ylim, respectively
"""
df1=plt.figure()
plt.plot(listxq,listyq);
if xlb != 'none':
plt.xlabel(xlb)
if ylb != 'none':
plt.ylabel(ylb)
if xlim != 'none':
try:
plt.xlim(xlim)
except:
pass
if ylim != 'none':
try:
plt.ylim(ylim)
except:
pass
df1.show()
return
def lxyloglog(listxq,listyq,xlb='none',ylb='none',xlim='none',ylim='none'):
"""
Shorthand plotting function for a single list of x-values and y-values with logrithmic axes in both directions
Be sure to include the list of (equal-length) x-values and y-values separately
Example: ep.lxyloglog([10,100,1000],[.3,.05,.001]) generates a log-log plot of points (10,.3), (100,.05), and (1000,.001)
Optional: Set the labels for the x-axis and y-axis with xlb and ylb, respectively
Optional: Set the limits for the x-axis and y-axis with xlim and ylim, respectively
"""
df1=plt.figure()
plt.loglog(listxq,listyq);
if xlb != 'none':
plt.xlabel(xlb)
if ylb != 'none':
plt.ylabel(ylb)
if xlim != 'none':
try:
plt.xlim(xlim)
except:
pass
if ylim != 'none':
try:
plt.ylim(ylim)
except:
pass
df1.show()
return
def lsav(listq,FileNameQ,blockdisplay=True):
"""
Shorthand function for saving a plot of a single list of y-values as a .png file
Example: ep.lsav([1,2,4,8],'/reg/neh/operator/mecopr/my_plot') saves the plot of [1,2,4,8]
to the file name and path '/reg/neh/operator/mecopr/my_plot'
(there is no need to include the file extension -- it is added inside the code)
Optional: blockdisplay=True closes the figure after creating and saving;
blockdisplay=False means the figure will be saved AND displayed on the screen
"""
df1=plt.figure()
plt.plot(listq);
df1.savefig(str(FileNameQ+'.png'))
if blockdisplay:
plt.close(df1)
return
def lxysav(listxq,listyq,FileNameQ,abs_path=False,xlb='none',ylb='none',xlim='none',ylim='none'):
"""
Shorthand function for saving a plot of a single list of x-values and y-values
Be sure to include the list of (equal-length) x-values and y-values separately
Example: ep.lxysav([10,20,30],[1,5,18],'/reg/neh/operator/mecopr/my_plot',abs_path=True)
saves the plot of points (10,1), (20,5), and (30,18) to the file name and
path '/reg/neh/operator/mecopr/my_plot'
(there is no need to include the file extension -- it is added inside the code)
Optional: Set abs_path=False to save the plot to your current working directory;
Set abs_path=True to specify the absolute path where you will save your plot
Optional: Set the labels for the x-axis and y-axis with xlb and ylb, respectively
Optional: Set the limits for the x-axis and y-axis with xlim and ylim, respectively
"""
df1=plt.figure()
plt.plot(listxq,listyq);
if abs_path:
figfilename=FileNameQ;
else:
figfilename=str(GLOBAL.PSFILEPATH+FileNameQ+'.png')
if xlb != 'none':
plt.xlabel(xlb)
if ylb != 'none':
plt.ylabel(ylb)
if xlim != 'none':
try:
plt.xlim(xlim)
except:
pass
if ylim != 'none':
try:
plt.ylim(ylim)
except:
pass
df1.savefig(figfilename)
plt.close(df1)
return
def lcomp(listq,goalq,Map,tMax):
"""
Shorthand function for comparing a pulse shape to its targeted pulse shape; not used much currently
Map and tMax are the parameters used for formatting the data in listq according to the desired new x- and y-range
goalq is the waveform to which the formatted listq will be compared
"""
formtra=[]
formtra.append(LPL._TraceFormatting(listq,Map,tMax, AvgRange=1, FWHM=1))
formtra.append(goalq)
ep.ll(formtra)
return
@classmethod
def lcsv(cls,CSVname):
"""
Shorthand plotting function for a single list of y-values loaded from a CSV file; hasn't been used for ages
File should be located within the data folder along GLOBAL.PSFILEPATH, i.e. '/reg/neh/operator/mecopr/mecpython/pulseshaping/data'
"""
with open(GLOBAL.PSFILEPATH+'data/'+CSVname+'.csv','r') as filehead:
RawListQ=filehead.read()
ListedValues=RawListQ.split('\n')
cls.l(ListedValues[:-1])
return
@classmethod
def llcsv(cls,CSVHeadname):
"""
Shorthand plotting function for a nested list of four sets of y-values loaded from a CSV file; hasn't been used for ages
File should be located within the data folder along GLOBAL.PSFILEPATH, i.e. '/reg/neh/operator/mecopr/mecpython/pulseshaping/data'
Channels formatted to match output of LeCroy schall function, but this also isn't used much anymore at all
Only the header of the name needs to be specified, i.e. not counting _ch1, etc.
"""
ListofListedValues=[]
for ii in range(1,5):
with open(GLOBAL.PSFILEPATH+'data/'+CSVHeadname+'_ch'+str(ii)+'.csv','r') as filehead:
RawListQ=filehead.read()
ListedValues=RawListQ.split('\n')
ListofListedValues.append(ListedValues[:-1])
cls.ll(ListofListedValues)
return
def rcsv(CSVname):
"""
Shorthand function for reading a list of y-values from a CSV file; hasn't been used for ages
File should be located within the data folder along GLOBAL.PSFILEPATH, i.e. '/reg/neh/operator/mecopr/mecpython/pulseshaping/data'
Returns the values read out from the CSV file as an array
"""
with open(GLOBAL.PSFILEPATH+'data/'+CSVname+'.csv','r') as filehead:
RawListQ=filehead.read()
if '\n' in RawListQ:
ListedValues=RawListQ.split('\n')
elif '\r\n' in RawListQ:
ListedValues=RawListQ.split('\r\n')
elif ',' in RawListQ:
ListedValues=RawListQ.split(',')
else:
print('Unrecognized format on input file.')
return ListedValues
def ll(llist,xlb='none',ylb='none',xlim='none',ylim='none'):
"""
Shorthand plotting function for a list of multiple lists of y-values
Example: ep.ll([[1,2,4],[2,7,5],[8,4,2]]) plots three traces over the top of each other:
[1,2,4] in one trace, [2,7,5] in one trace, and [8,4,2] in one trace
"""
df1=plt.figure()
for ii in range(len(llist)):
plt.plot(llist[ii]);
if xlb != 'none':
plt.xlabel(xlb)
if ylb != 'none':
plt.ylabel(ylb)
if xlim != 'none':
try:
plt.xlim(xlim)
except:
pass
if ylim != 'none':
try:
plt.ylim(ylim)
except:
pass
df1.show()
def llxy(llistxyq,xlb='none',ylb='none',xlim='none',ylim='none'):
"""
Shorthand plotting function for a list of multiple lists of x-values and y-values
Example: ep.llxy([[[1,2,3],[1,2,4]],[[1,2,3],[2,7,5]],[[.9,1.7,2.8],[8,4,2]]]) plots three traces over the top of each other:
points (1,1), (2,2), and (3,4) in one trace, points (1,2), (2,7), and (3,5) in one trace, and
points (0.9,8), (1.7,4), and (2.8,2) in one trace
Optional: Set the labels for the x-axis and y-axis with xlb and ylb, respectively
Optional: Set the limits for the x-axis and y-axis with xlim and ylim, respectively
"""
df1=plt.figure()
for ii in range(len(llistxyq)):
plt.plot(llistxyq[ii][0],llistxyq[ii][1]);
if xlb != 'none':
plt.xlabel(xlb)
if ylb != 'none':
plt.ylabel(ylb)
if xlim != 'none':
try:
plt.xlim(xlim)
except:
pass
if ylim != 'none':
try:
plt.ylim(ylim)
except:
pass
df1.show()
return
def llxyloglog(llistxyq,xlb='none',ylb='none',xlim='none',ylim='none'):
"""
Shorthand plotting function for a list of multiple lists of x-values and y-values with logrithmic axes in both directions
Example: ep.llxyloglog([[[10,100,1000],[1,20,4]],[[10,100,1000],[.2,70,5]],[[9,170,2800],[80,4,.02]]]) plots three traces over the top of each other:
points (10,1), (100,20), and (1000,4) in one trace, points (10,0.2), (100,70), and (1000,5) in one trace, and
points (9,80), (170,4), and (2800,0.02) in one trace
Optional: Set the labels for the x-axis and y-axis with xlb and ylb, respectively
Optional: Set the limits for the x-axis and y-axis with xlim and ylim, respectively
"""
df1=plt.figure()
for ii in range(len(llistxyq)):
plt.loglog(llistxyq[ii][0],llistxyq[ii][1]);
if xlb != 'none':
plt.xlabel(xlb)
if ylb != 'none':
plt.ylabel(ylb)
if xlim != 'none':
try:
plt.xlim(xlim)
except:
pass
if ylim != 'none':
try:
plt.ylim(ylim)
except:
pass
df1.show()
return
@classmethod
def llt(cls,listq,Map):
"""
Shorthand function for plotting a list of lists projected according to a map; not used much currently
"""
formtra=[]
for ii in range(len(listq)):
formtra.append(LPL._TraceFormatting(listq[ii],Map[ii],1, AvgRange=1, FWHM=1))
cls.ll(formtra)
return
@classmethod
def llcomp(cls,listq,goalq,Map,tMax):
"""Shorthand function for comparing a list of pulse shapes to a targeted pulse shape; not used much currently"""
formtra=[]
formtra.append(LPL._TraceFormatting(listq[-1],Map,tMax, AvgRange=1, FWHM=1))
formtra.append(goalq)
cls.ll(formtra)
return
@classmethod
def lfft(cls,errlistQ,time_stepQ):
"""
Shorthand function for loglog-plotting the normalized power spectrum of the Fourier transform of a waveform,
given the temporal spacing of the waveform using time_stepQ in seconds
Example: for a waveform of energy data sampled every 1min, errlistQ would be the data and time_stepQ would be 60
Example: for a simulated laser field with point spacing of 0.1fs, errlistQ would be the laser field and time_stepQ would be 1e-16
"""
#time_step1=0.1
freqs1=np.fft.fftfreq(np.array(errlistQ).size, time_stepQ)
idx1=np.argsort(freqs1)
fftd1=np.fft.fft(errlistQ)
ps1=np.abs(fftd1/max(fftd1))**2
cls.lxyloglog(freqs1[idx1],ps1[idx1])
return [freqs1[idx1],ps1[idx1]]
class efc:
"""
Extra Function Class for convenient shorthand ways of doing common things like printing in color, accepting keyboard input, interfacing with PVs, etc.
Typical usage via efc.[command]
Possible commands include:
:reloadchk #checks the version of the code being used
:reloadpkg #reloads the meclas package from file, in case of version update
:cstr #generates string with color text/background options
:cprint #prints strings with color text/background options
:getch #prompts user for a single input character
:getch_with_TO #getch but with TimeOut
:input_with_TO #input but with TimeOut
:pickledump2 #shortcut for saving objects to file
:pickleload2 #shortcut for reloading objects from file
:dotsleep #shortcut for printing dots while waiting
:ssleep #shortcut for standard socket waiting time
:rPV #shortcut for reading values from PVs
:wPV #shortcut for writing values to PVs
potential future work
- consider consolidating some functions into one
- e.g. getch with and without TimeOut?
- add EZeLog function
- take care of all the hassle of posting to the LCLS eLog from Python so people can do so very easily
- add threading helper functions?
"""
def reloadchk():
"""
Shorthand sanity check for the current version of the code
When making code edits, the author typically administratively writes the date and maybe a unqiue and helpful message
"""
print('Last stamped: 20230313a')
def reloadpkg():
"""
Shorthand way of reloading the meclas package using the %run IPython command
Example: after making and saving edits to the file while testing, simply run reloadpkg() to load the latest changes
into the current session of hutch python (rather than needing to exit and start a new session)
"""
from IPython import get_ipython
#spec = importlib.util.spec_from_file_location("mecps4", "/reg/neh/operator/mecopr/mecpython/pulseshaping/mecps4.py")
#mecps4 = importlib.util.module_from_spec(spec)
#spec.loader.exec_module(mecps4);
#importlib.reload(pkgname)
ipy = get_ipython()
ipy.magic("run /reg/g/pcds/pyps/apps/hutch-python/mec/mec/macros/meclas.py")
#/reg/g/pcds/pyps/apps/hutch-python/mec/mec/macros/
@staticmethod
def _tc():
"""An internal list of color codes used for color printing to terminal, used by cprint(), based on ANSI escape codes"""
#future add: RGB 0-255 foreground: "\033[38;2;" + Rch + ";" + Gch + ";" + Bch + "m"
#future add: RGB 0-255 background: "\033[48;2;" + Rch + ";" + Gch + ";" + Bch + "m"
colors=['ENDC','BLINK','K','R','G','Y','B','M','C','W','BK','BR','BG','BY','BB','BM','BC','BW','BRK','BRR','BRG','BRY','BRB','BRM','BRC','BRW','BBRK','BBRR','BBRG','BBRY','BBRB','BBRM','BBRC','BBRW']#B- for background, BR+ for
colorcodes=['\033['+str(ii)+'m' for ii in [0,5,30,31,32,33,34,35,36,37,40,41,42,43,44,45,46,47,90,91,92,93,94,95,96,97,100,101,102,103,104,105,106,107]];
return dict(zip(colors,colorcodes))
@classmethod
def cprint(cls,strQ,paramsQ):#delim with commas
"""
Prints to terminal using provided parameters for color, etc.
Param color choices are 'K','R','G','Y','B','M','C','W'; also available is 'BLINK'
:Colors above correspond to blacK, Red, Green, Yellow, Blue, Magenta, Cyan, White
:Add a 'BR' before each color to specify it to the BRight
:Add a 'B' on the very front to specify the color as Background
:Example: 'W' is White (text)
:Example: 'BK' is Background blacK
:Example: 'BRR' is BRight Red (text)
:Example: 'BBRB' is Background BRight Blue
Multiple parameters can be delimited using commas (order does not matter)
:Example: 'BLINK,BRY,BB' is BLINKing BRight Yellow text on Background Blue
Example: cprint('Warning!','BLINK,BRW,BBRR') prints 'Warning!' in blinking BRight White text on Background BRight Red
Alternatively, color strings can be created using cstr() and passed straight to the standard print() function
"""
prargs=''
if len(paramsQ) == 0:
paramsQ='ENDC'
for eaarg in paramsQ.split(','):
prargs+=cls._tc()[eaarg.upper()]
print(f"{prargs}"+strQ+f"{cls._tc()['ENDC']}")
return
@classmethod
def cstr(cls,strQ,paramsQ):
"""
Prepares a string (i.e. for later printing to terminal) using provided parameters for color, etc.
Param color choices are 'K','R','G','Y','B','M','C','W'; also available is 'BLINK'
:Colors above correspond to blacK, Red, Green, Yellow, Blue, Magenta, Cyan, White
:Add a 'BR' before each color to specify it to the BRight
:Add a 'B' on the very front to specify the color as Background
:Example: 'W' is White (text)
:Example: 'BK' is Background blacK
:Example: 'BRR' is BRight Red (text)
:Example: 'BBRB' is Background BRight Blue
Multiple parameters can be delimited using commas (order does not matter)
:Example: 'BLINK,BRY,BB' is BLINKing BRight Yellow text on Background Blue
Example: cstr('Warning!','BLINK,BBRY,K') creates a string 'Warning!' that, when printed to terminal
using print(), appears in blinking blacK text on Background BRight Yellow
Note: print(cstr('abc','R,BBRB')) is equivalent to cprint('abc','R,BBRB'), so cstr() is mostly useful
when auto-generating colored strings to be printed later
Note: if concaenating colored strings with normal strings, the normal strings will not be affected
Note: concatenating multiple color strings is necessary for creating combined strings with varying coloration
"""
prargs=''
if len(paramsQ) == 0:
paramsQ='ENDC'
for eaarg in paramsQ.split(','):
prargs+=cls._tc()[eaarg.upper()]
return f"{prargs}"+str(strQ)+f"{cls._tc()['ENDC']}"
@staticmethod
def _keybd():
""""Prepares some keyboard input interpretation parameters needed for interpreting certain keystrokes returned by getch()"""
return dict(zip(['key_Enter','key_Esc','key_Up','key_Dn','key_Rt','key_Lt'],[13,27,'\033[A','\033[B','\033[C','\033[D']))
def getch():
"""
Similar to input() but takes only a single character and doesn't require hitting Enter
Example: my_char = getch() will cause the terminal to wait for keyboard input and then record
the first keypress into my_char and then return to terminal or the next line in a function/script
"""
fdInput = sys.stdin.fileno()
termAttr = termios.tcgetattr(0);#fdInput);#0) test to fix print problems
tty.setraw(fdInput)
ch = sys.stdin.buffer.raw.read(4).decode(sys.stdin.encoding)
if len(ch) == 1:
if ord(ch) < 32 or ord(ch) > 126:
ch = ord(ch)
elif ord(ch[0]) == 27:
ch = '\033' + ch[1:]
termios.tcsetattr(fdInput, termios.TCSADRAIN, termAttr)
return ch
def getch_with_TO(TOsec,display=True):
"""
Same as getch() except with a useful timeout period so as to not block a terminal waiting for input
Use TOsec to set the time-out window in seconds
Use display=True if you want a message printed to terminal that lets the operator know the time-out duration
Use display=False to avoid printing a message to terminal telling how many seconds one has to enter a character
Example: my_char = getch_with_TO(5,display=False) will wait for the next keyboard input for five seconds
: if a keystroke is not recorded within 5 seconds, function returns False
: if a keystroke *is* recorded within 5 seconds, function returns the first key pressed
"""
if display:
print("You have {} seconds to answer! ".format(str(TOsec)),end='',flush=True)
fdInput = sys.stdin.fileno()
termAttr = termios.tcgetattr(0);#fdInput);#0) test to fix print problems
tty.setraw(fdInput)
i, o, e = select.select( [sys.stdin], [], [], TOsec)
if (i):
ch = sys.stdin.buffer.raw.read(4).decode(sys.stdin.encoding)
if len(ch) == 1:
if ord(ch) < 32 or ord(ch) > 126:
ch = ord(ch)
elif ord(ch[0]) == 27:
ch = '\033' + ch[1:]
termios.tcsetattr(fdInput, termios.TCSADRAIN, termAttr)
return ch
else:
print('Timed out!',end='\r\n',flush=False)
termios.tcsetattr(fdInput, termios.TCSADRAIN, termAttr)
return False
def input_with_TO(TOsec, display=True):
"""
Similar to input() but includes a user timeout so the window for input doesn't stay open forever and block the terminal
Use TOsec to set the time-out window in seconds
Use display=True if you want a message printed to terminal that lets the operator know the time-out duration
Use display=False to avoid printing a message to terminal telling how many seconds one has to enter input
Example: my_input = input_with_TO(30,display=False) will wait for keyboard input to be entered for thirty seconds
: if input is not entered within 30 seconds, the function returns False
: if input *is* enetered within 30 seconds, the function returns the entered text
"""
if display:
print("You have {} second{} to answer! ".format(str(TOsec), '' if TOsec==1 else 's'),end='',flush=True)
i, o, e = select.select( [sys.stdin], [], [], TOsec)
if (i):
return sys.stdin.readline().strip()
else:
print('Timed out!',end='\r\n',flush=False)
return False
def pickledump2(objQ,fullFileNameQ):
"""
Shortcut for generating pickle files and setting file access permissions as liberally as possible
The first argument objQ is the python object you want to save to file
The second argument fullFileNameQ is the full file path and file name for your file
:note that for pickle files it is best to end the file with the '.p' file extension
Example: pickledump2(thanos_data, '/reg/neh/operator/mecopr/thanos_data.p') saves the contents of thanos_data
to the file '/reg/neh/operator/mecopr/thanos_data.p' and sets file access permissions liberally
"""
pickle.dump(objQ,open(fullFileNameQ,'wb'));
os.chmod(fullFileNameQ,stat.S_IRUSR|stat.S_IWUSR|stat.S_IRGRP|stat.S_IWGRP|stat.S_IROTH|stat.S_IWOTH);#
#os.chmod(fullFileNameQ,stat.S_IRWXU|stat.S_IRWXG|stat.S_IRWXO);#
return
def pickleload2(fullFileNameQ):
"""
Shortcut for loading pickle files without having to type the open() command
The only argument fullFileNameQ is the full file path and file name for your file
:note that pickle files tend to have the '.p' file extension
Example: thanos_data = pickleload2('/reg/neh/operator/mecopr/thanos_data.p') loads the contents of
the file '/reg/neh/operator/mecopr/thanos_data.p' into thanos_data
"""
return pickle.load(open(fullFileNameQ,'rb'));
def dotsleep(tSEC):
"""
Similar to time.sleep() but also prints a . character every second for the entire duration, finishing with a * character
Example: dotsleep(5) prints a '.' character to screen once per second for 5 seconds, at which point it prints a '*'
Note: printing all happens in-line rather than on separate lines
"""
for ii in range(tSEC):
print('.',end='',flush=True);time.sleep(1);
print('*')
return
def ssleep():
"""
A convenient shortcut for pausing 150ms for certain types of socket functions to wrap up
ssleep() is equivalent to time.sleep(0.15)
"""
time.sleep(0.15);
return
# =============================================================================
# @classmethod
# def pldemo(cls):
# """An old demo meant to demonstrate how Python could be used to tune a laser based on camera feedback and EPICS actuators"""
# plt.ion()
# fig,axs=plt.subplots(1,1)
# plt.show()
# xpos=85;ypos=65;xdel=20;ydel=20;
# Z=[[np.exp(-((ii-xpos)/10)**2-((jj-ypos)/7.5)**2) for ii in range(100)] for jj in range(100)]
# Zref=[[1 if np.exp(-((ii-50)/2)**2-((jj-50)/2)**2) > 0.5 else 0 for ii in range(100)] for jj in range(100)]
# ax1=axs.imshow(Z,origin='lower')
# axs.imshow(Zref,alpha=0.1,origin='lower')
# #ax1,=axs[0].plot(xdat,ydat)
# #ax2,=axs[1].plot(xdat,ydat)
# #ax4,=axs[1,1].plot(xdat,ydat)
# axss=[ax1]#[ax1,ax2,ax3,ax4]
# cont=True
# while cont:
# axss[0].set_data(Z)
# fig.canvas.draw_idle()
# plt.pause(0.025)
# qq=cls.getch()
# if qq==cls._keybd()['key_Dn']:
# ypos-=ydel
# elif qq==cls._keybd()['key_Up']:
# ypos+=ydel
# elif qq==cls._keybd()['key_Rt']:
# xpos+=xdel
# elif qq==cls._keybd()['key_Lt']:
# xpos-=xdel
# elif qq=='w':
# ydel=ydel*2
# elif qq=='s':
# ydel=ydel/2
# elif qq=='a':
# xdel=xdel/2
# elif qq=='d':
# xdel=xdel*2
# elif qq==cls._keybd()['key_Esc']:
# cont=False
# else:
# pass
# Z=[[np.exp(-((ii-xpos)/10)**2-((jj-ypos)/7.5)**2) for ii in range(100)] for jj in range(100)]
# print('['+str(xpos)+','+str(ypos)+']')
# plt.ioff()
# =============================================================================
def rPV(yourPV,display=True):
"""
Convenient short-hand way to read out and return the value from a PV
Input yourPV needs to be formatted as a string
If display=True then any readout failures are printed to terminal
If display=False then there is no printout (used mainly in report generation while checking many PVs)
"""
try:
temppv=EpicsSignal(yourPV);
tempval=temppv.get()
return tempval
except:
if display:
print('Read-out failed: {}!'.format(yourPV))
return False
def wPV(yourPV, yourVal, display=True):
"""
Convenient short-hand way to write a value to a PV
Input yourPV needs to be formatted as a string
Input yourVal needs to fit the type (e.g. string vs number, etc.) expected by yourPV
"""
try:
temppv=EpicsSignal(yourPV);
temppv.put(yourVal)
except:
if display:
print('Write failed:{} and {}!'.format(yourPV, yourVal))
return
# =============================================================================
# def Thread_Function(thrEvent, interfunc, args, timeout=50, loopmax=10):
# #definitely the wrong function, just forgot where the right one is
# loopiter=0
# t0=time.time()
# while thrEvent.is_set():
# interfunc(thrEvent, *args)
# loopiter+=1
# if (loopiter >= loopmax) or (t0 + timeout <= time.time()):
# thrEvent.clear()
#
# def Thread_Function3(thrEvent, interfunc, args, timeout=50, loopmax=10):
# loopiter=0
# t0=time.time()
# while thrEvent.is_set():
# interfunc(thrEvent, *args)
# loopiter+=1
# if (loopiter >= loopmax) or (t0 + timeout <= time.time()):
# thrEvent.clear()
#
# def interfunc1(thrEvent, nameQ, name2Q):
# uin=input()
# print('{} and {}, Here it is big: {}'.format(nameQ, name2Q,uin.upper()))
# if uin.lower() == 'q':
# thrEvent.clear()
# time.sleep(1)
#
# @classmethod
# def threaddemo(cls):
# running = threading.Event()
# running.set()
#
# thread = threading.Thread(target=cls.Thread_Function, args=(running,'t1'))
# thread2 = threading.Thread(target=cls.Thread_Function3, args=(running, cls.interfunc1, ('sam','dave'), 3, 200))
#
# thread.start()
# time.sleep(0.5)
# thread2.start()
# time.sleep(0.25)
#
# ppp=0def View(*CAMargs,ImageNo=2,LIVE=False,MAXLOOPS=10):
# if CAMargs == ('all',):
# CAMargs = ('Regen', 'Trap', 'StrInA', 'StrInB', 'MPA1In', 'MPA1Out', 'MPA2In', 'MPA2Out', 'MPA2Xtal', 'CompIn', 'CompOutNF', 'CompOutFF')
# if len(CAMargs) == 1:
# _QuickView(*CAMargs,LIVE=LIVE,MAXLOOPS=MAXLOOPS)
# return
# plt.ion()
# subply=len(CAMargs)//2 + len(CAMargs)%2;subplx=2;
# fig,axs=plt.subplots(subply,subplx,figsize=(5,2*subply));
# axss=[];tres1L=[];tPVheadL=[]
# for ii in range(len(CAMargs)):
# tidx=(ii//2,ii%2) if len(CAMargs) > 2 else (ii%2)
# axs[tidx].axes.xaxis.set_ticklabels([]);
# axs[tidx].axes.yaxis.set_ticklabels([]);
# axs[tidx].tick_params(direction='in');
# axs[tidx].set_ylabel(CAMargs[ii]);
# try:
# tPVhead=CAMname(CAMargs[ii])
# tPVheadL.append(tPVhead);
# tres1=rPV(tPVhead+':IMAGE'+str(ImageNo)+':ArraySize1_RBV')
# tres2=rPV(tPVhead+':IMAGE'+str(ImageNo)+':ArraySize0_RBV')
# tres3=rPV(tPVhead+':IMAGE'+str(ImageNo)+':ArraySize2_RBV')
# tresL=sorted([tres1,tres2,tres3],reverse=True)
# twf=rPV(tPVhead+':IMAGE'+str(ImageNo)+':ArrayData')
# if len(twf) != tresL[0]*tresL[1]:
# twf = list(twf) + (tresL[0]*tresL[1] - len(twf))*[0]
# tres1=tres3
# tempax=axs[tidx].imshow(np.array_split(np.array(twf),tres1));
# tres1L.append(tres1);
# axss.append(tempax)
# except:
# print('Error occured when plotting {}!'.format(CAMargs[ii]))
# if (len(CAMargs) > 2) and (len(CAMargs)%2 > 0):
# iit=len(CAMargs)
# tidx=(iit//2,iit%2)
# axs[tidx].axis('off');
# fig.tight_layout()
# plt.show();
# waittime=.01;
# plt.pause(waittime);
# time.sleep(waittime)
# loopcount=0
# if LIVE:
# while loopcount<MAXLOOPS:
# for ii in range(len(CAMargs)):
# tidx=(ii//2,ii%2) if len(CAMargs) > 2 else (ii%2)
# try:
# twf=rPV(tPVheadL[ii]+':IMAGE'+str(ImageNo)+':ArrayData')
# if tPVheadL[ii] == 'MEC:GIGE:29':
# tres1=rPV(tPVheadL[ii]+':IMAGE'+str(ImageNo)+':ArraySize1_RBV')
# tres2=rPV(tPVheadL[ii]+':IMAGE'+str(ImageNo)+':ArraySize0_RBV')
# tres3=rPV(tPVheadL[ii]+':IMAGE'+str(ImageNo)+':ArraySize2_RBV')
# tresL=sorted([tres1,tres2,tres3],reverse=True)
# if len(twf) != tresL[0]*tresL[1]:
# twf = list(twf) + (tresL[0]*tresL[1] - len(twf))*[0]
# tres1=tres3
# tres1L[ii]=tres1
# axss[ii].set_data(np.array_split(np.array(twf),tres1L[ii]));
# fig.canvas.draw_idle()
# plt.pause(waittime)
# time.sleep(waittime)
# except:
# print('Error occured when plotting {}!'.format(CAMargs[ii]))
# loopcount+=1
# while running.is_set():
# print('Waiting...')
# time.sleep(1)
# ppp+=1
# if ppp > 20:
# running.clear()
#
# print('Wait until Thread is terminating')
# thread.join()
# thread2.join()
# print("EXIT __main__")
# =============================================================================
# =============================================================================
# def reprintdemo():
# #does not work in IPython on RHEL machines
# for x in range(10):
# print('{:>5}'.format(x*10**int(5*np.random.rand())), end='\r');time.sleep(1);
# print()
# =============================================================================
class HAWG:
"""
Class containing all the necessary functions for running the Highland Arbitrary Waveform Generator for LPL pulse shaping
Unless speed is necessary, it is usually most appropriate to interface with the Highland simply by using HAWG().[command].
This will take care of all of the socket opening/closing by itself.
Example: read out the current Highland waveform using HAWG().ReadPulseHeights()
Example: reset the Highland using HAWG().Reset()
(Alternatively, save the initialized object via SH=HAWG() and then use SH.ReadPulseHeights(), etc.)
List of possible commands includes:
:ReadStatus
:ClearStatus
:ReadPulseHeights
:WritePulseHeights
:ReadFiducialImpulseSettings
:WriteFiducialImpulseSettings
:WriteEnableByte
:ReadT0Delay
:ReadWaveAmplitudeCalibrations
:WriteWaveAmplitudeCalibrations
:ReadWaveTimeCalibrations
:WriteWaveTimeCalibrations
:ReadMiscellaneousCalibrations
:WriteMiscellaneousCalibrations
:ReadWalkTable
:WriteWalkTable
:FidOn
:FidOff
Most of these functions are for expert use only, so please be careful with them!
Potential future work:
: FET surveys
: Highland internal parameter saving and restoring
: find fiducial bump
: other outstanding functions not yet programmed from the T400B manual
"""
def __init__(self):
"""Initializes the object; only one should be instantiated at a time"""
self._HighlandSocket=None
self._HIGHLAND_SLAVE_ADDRESS=0 #arbitrary, I think
def _Open(self):
"""
Takes care of opening the socket to the Highland; if called explicitly like this, it MUST be followed by a _Close()
statement or else you'll block the socket and need to power cycle the unit using HAWG().Reset()
Takes care of opening the socket to the Highland; if called explicitly like this,
it MUST be followed by a _Close() statement or else you'll block the socket and need to
power cycle the unit using HAWG().Reset()
Using this function allows one to leave the socket open, which allows for quicker access to Highland functions.
:Example: after SH = HAWG() and SH._Open() then one may use functions inside a loop, e.g.
for loops in range(50):
curr_wvfm = SH._ReadPulseHeights()
#calculate some change to the waveform based on feedback from diodes, etc.
SH._WritePulseHeights(new_wvfm)
reads and writes 50 Highland waveforms withotu opening and closing the socket in between each loop
WARNING: DO NOT FORGET to close the socket at the end of your loop (etc.) using SH._Close()
In general, there is an "underscored" version of most of the functions mentioned in the SH docstring that
can be used in the way described above (e.g. _ReadPulseHeights, _GetStatus, etc.).
"""
if not self._HighlandSocket:
try:
self._HighlandSocket=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._HighlandSocket.settimeout(1.0)
self._HighlandSocket.connect((GLOBAL.HIGHLAND_IP, 2000))#'highland-mec-01'
#Highland's IP address can be changed using the Lantronix DeviceInstaller
except:
print('HIGHLAND NOT CONNECTED')
else:
print('Socket may already be open!')
def _Close(self):
"""
Takes care of closing the socket to the Highland if first explicitly opened using _Open()
Use this after you have taken care of all of your business that you started with _Open()
Example: SH=HAWG()
SH._Open()
#a bunch of consecutive Highland calls using underscored commands like _ReadPulseHeights, _WritePulseHeights, etc.
SH._Close()
"""
try:
self._HighlandSocket.close()
self._HighlandSocket=None
except:
print('Unable to close socket -- it may already be closed')
return
def Reset(self):
"""Power-cycles the Highland; this is necessary if the socket was broken"""
print('Powering off Highland AWG, waiting 3sec...',end='',flush=True);
GLOBAL.LPLHAWGpwr.put(2);
efc.dotsleep(3);
print('Rebooting Highland AWG, waiting 10sec...',end='',flush=True)
GLOBAL.LPLHAWGpwr.put(1);
efc.dotsleep(10);
@staticmethod
def _Hex1Byte(num):
"""Internal function for formatting a number as a single byte; used for communicating with the Highland"""
return '{0:02x}'.format(int(num)%(0xff+1))
@staticmethod
def _Hex2Byte(num):
"""Internal function for formatting a number as two bytes; used for communicating with the Highland"""
return '{0:04x}'.format(int(num)%(0xffff+1))
@staticmethod
def _ByteSum(Datastr):
"""Internal byte-wise addition used for the Highland checksum
accepts string in 'xx' format, e.g. '1e....'
input must be in format returned by hexlify(data)"""
bytesum=0
for byte in range(len(Datastr)//2):
bytesum+=int(Datastr[2*byte:2*byte+2],16)
return bytesum #returns an integer
@classmethod
def _PollConstructor(cls,COMMAND_CODE,POLL_LENGTH,SLAVE_ADDRESS,DATA):
"""Generic constructor for all polls to the internal Highland processor
enter COMMAND_CODE, POLL_LENGTH, SLAVE_ADDRESS as integer values
enter DATA as a string, e.g. 'ffff0000' or empty string '' for no data"""
ProtoCommand=''
ProtoCommand+='0B'#MSYN: all commands begin with this byte (0B)
ProtoCommand+=cls._Hex2Byte(POLL_LENGTH) #BC:BC: number of bytes in message
ProtoCommand+=cls._Hex2Byte(SLAVE_ADDRESS) #RA:RA: slave address
ProtoCommand+=cls._Hex1Byte(COMMAND_CODE) #CMD: command code
ProtoCommand+=DATA #<data> must already be formatted properly 'xxxx' or ''
BYTE_SUM=cls._ByteSum(ProtoCommand) #compute the sum
ProtoCommand+=cls._Hex2Byte(BYTE_SUM) #CKS:CKS: 16-bit sum of all preceding bytes
ProtoCommand+='17' #ETB: end of message byte, 17 hex
Command=unhexlify(ProtoCommand)
return Command
@classmethod
def _ReplyInterpreter(cls,REPLY_LENGTH,SLAVE_ADDRESS,REPLY_STRING):
"""Interpreter for replies from the internal Highland processor
input REPLY_STRING already formatted using hexlify"""
HError=''
if int(REPLY_STRING[0:2],16)==int('1e',16): HError+='0'
else: HError+='1' #wrong start-of-message byte, 1E hex
if int(REPLY_STRING[2:6],16)==REPLY_LENGTH: HError+='0'
else: HError+='1' #wrong reply length; should never happen, as we recv(expected #)
if int(REPLY_STRING[6:10],16)==SLAVE_ADDRESS: HError+='0'
else: HError+='1' #slave address not echoed
HStatus=REPLY_STRING[10:12] #will return status as string, interpret later
HData=REPLY_STRING[12:-6] #cuts off SSYN,BC:BC,RA:RA,STS and CKS:CKS,ETB bytes
# leaves only the data string; leaves empty string '' for no data
if cls._ByteSum(REPLY_STRING[:-6])==int(REPLY_STRING[-6:-2],16): HError+='0'
else: HError+='1' #checksum error
if int(REPLY_STRING[-2:],16)==int('17',16): HError+='0'
else: HError+='1' #wrong end-of-message byte, 17 hex
return HStatus, HData, HError
@classmethod
def _SendPollRecvReply(cls,MySocketQ,COMMAND_CODE,POLL_LENGTH,REPLY_LENGTH,SLAVE_ADDRESS,DATA):
"""Generic utility for sending a poll to the internal Highland processor and receiving its reply"""
MyPollQ=cls._PollConstructor(COMMAND_CODE,POLL_LENGTH,SLAVE_ADDRESS,DATA)
MySocketQ.send(MyPollQ)
MyRawReplyQ=MySocketQ.recv(REPLY_LENGTH)
HStatusQ, HDataQ, HErrorQ = cls._ReplyInterpreter(REPLY_LENGTH,SLAVE_ADDRESS,hexlify(MyRawReplyQ))
return HStatusQ, HDataQ, HErrorQ
@staticmethod
def _StatusInterpreter(HError, HStatus, Quiet=True):
"""Interpreter for the part of the Highland processor response indicating internal status"""
if HError[0]=='1': print('WARNING: Wrong start-of-message byte received')
if HError[1]=='1': print('WARNING: Reply length discrepancy')
if HError[2]=='1': print('WARNING: Slave address not echoed')
if HError[3]=='1': print('WARNING: Checksum error')
if HError[4]=='1': print('WARNING: Wrong end-of-message byte received')
if not Quiet:
if int(HStatus,16)==0: print('STATUS: NORMAL')
else:
print('STATUS: ERROR FLAG(S) RECEIVED')
if ((int(HStatus,16))&(2**(8-1)))!=0: print('-trigger/bias timing error')
if ((int(HStatus,16))&(2**(8-3)))!=0: print('-backup RAM data/calibrations lost flag')
if ((int(HStatus,16))&(2**(8-4)))!=0: print('-powerfail/restart flag')
if ((int(HStatus,16))&(2**(8-7)))!=0: print('-trigger/bias timing error')
@classmethod
def _BaseFunction(cls,MySocketQ, SlaveAddress, CommandCode, PollLength, ReplyLength, MyData='',**kwargs):
"""Generic function used to construct the different individual commands to the Highland;
each individual command simply supplies its own unique code and data specific to that command's protocol"""
try:
HStatusQ, HDataQ, HErrorQ = cls._SendPollRecvReply(
MySocketQ,CommandCode,PollLength,ReplyLength,SlaveAddress,MyData)
cls._StatusInterpreter(HErrorQ, HStatusQ)
return HDataQ
except:
print('Failed!')
return False
def _FunctionWrapper(self,FuncQ,kwargs={}):
"""
A function wrapper that allows one to call each Highland command directly without having to worry about opening/closing sockets;
if issuing one-off commands that don't require high-frequency consistent execution, this is sufficient
In short, this automatically wraps an _Open() and _Close() statement around an "underscored" function (like _ReadPulseHeights)
in order to create a function (like ReadPulseHeights) that can be used without explicitly worrying about good socket habits
"""
try:
self._Open();time.sleep(0.15);
HDataQList=FuncQ(**kwargs);time.sleep(0.15);
self._Close();time.sleep(0.15);
except:
self._Close();time.sleep(0.15);
return HDataQList
def _ReadStatus(self):
"""
Bare function for the ReadStatus command; socket must be explicitly opened/closed
...
From the T400B manual:
POLL command 0 (with no <data> field) invokes a 'status data' reply from a slave.
The reply message contains a data field having the following subfields...
PROGRAM ID 8-byte ASCII firmware ID/revision field.
UPTIME 4-byte uptime, as 32-bit value, in seconds;
cleared at powerup time.
ENABLE 1-byte field identifying subsystems which
are enabled. See 'WRITE ENABLE
COMMAND' below for bit assignments.
"""
print('**READ STATUS**')
HDataQ=self._BaseFunction(self._HighlandSocket,self._HIGHLAND_SLAVE_ADDRESS,CommandCode=0,PollLength=9, ReplyLength=22,MyData='')
print('PROGRAM ID: '+unhexlify(HDataQ[:16]).decode())
print('UPTIME: '+str(int(HDataQ[16:24],16))+' seconds')
print('ENABLE:')
if ((int(HDataQ[-2:],16))&(2**(0)))!=0:
print('-CPU self-trigger, 960 Hz (test mode)')
if ((int(HDataQ[-2:],16))&(2**(1)))!=0:
print('-self-trigger, 20 kHz')
if ((int(HDataQ[-2:],16))&(2**(2)))!=0:
print('-external triggers')
if ((int(HDataQ[-2:],16))&(2**(3)))!=0:
print('-the BIAS generators')
print('****')
return HDataQ
def ReadStatus(self):
"""
Wrapped function for the ReadStatus command; socket is automatically opened/closed
...
From the T400B manual:
POLL command 0 (with no <data> field) invokes a 'status data' reply from a slave.
The reply message contains a data field having the following subfields...
PROGRAM ID 8-byte ASCII firmware ID/revision field.
UPTIME 4-byte uptime, as 32-bit value, in seconds;
cleared at powerup time.
ENABLE 1-byte field identifying subsystems which
are enabled. See 'WRITE ENABLE
COMMAND' below for bit assignments.
"""
HDataQ=self._FunctionWrapper(self._ReadStatus);
return HDataQ
def _ClearStatus(self):
"""
Bare function for the ClearStatus command; socket must be explicitly opened/closed
...
From the T400B manual:
Execution of this command clears the slave STATUS byte. If any error conditions
persist, status error bits may reappear immediately.
"""
print('**CLEAR STATUS**')
HDataQ=self._BaseFunction(self._HighlandSocket,self._HIGHLAND_SLAVE_ADDRESS,CommandCode=1,PollLength=9, ReplyLength=9,MyData='')
print('****')
return HDataQ
def ClearStatus(self):
"""
Wrapped function for the ClearStatus command; socket is automatically opened/closed
...
From the T400B manual:
Execution of this command clears the slave STATUS byte. If any error conditions
persist, status error bits may reappear immediately.
"""
HDataQ=self._FunctionWrapper(self._ClearStatus);
return HDataQ
def _ReadPulseHeights(self,ShowPlot=False):
"""
Bare function for the ReadPulseHeights command; socket must be explicitly opened/closed
...
From the T400B manual:
Poll command 2 (no <data> field) reads the 140 programmable waveform impulse
heights, each an unsigned 16-bit value (0 is zero height, 65535 is max). The slave
thus returns a 280-byte <data> field. The first two <data> bytes are the programmed
pulse height for the first 250 ps impulse segment, sent MS byte first.
"""
HDataQ=self._BaseFunction(self._HighlandSocket,self._HIGHLAND_SLAVE_ADDRESS,CommandCode=2,PollLength=9, ReplyLength=289,MyData='')
HDataQList=[int(HDataQ[ii*4:4+ii*4],16) for ii in range(int(len(HDataQ)//4))]
if ShowPlot:
ep.l(HDataQList)
return HDataQList
def ReadPulseHeights(self,ShowPlot=False):
"""
Wrapped function for the ReadPulseHeights command; socket is automatically opened/closed
...
From the T400B manual:
Poll command 2 (no <data> field) reads the 140 programmable waveform impulse
heights, each an unsigned 16-bit value (0 is zero height, 65535 is max). The slave
thus returns a 280-byte <data> field. The first two <data> bytes are the programmed
pulse height for the first 250 ps impulse segment, sent MS byte first.
"""
HDataQList=self._FunctionWrapper(self._ReadPulseHeights,{'ShowPlot':ShowPlot});
return HDataQList
def _WritePulseHeights(self, FileNameOrStringOrList=140*[0]):
"""
Bare function for the WritePulseHeights command; socket must be explicitly opened/closed
...
From the T400B manual:
Poll command 3 writes the 140 programmable pulse heights, each an unsigned
16-bit value. The poll <data> field is 280 bytes in size, with the first two bytes being
the programmed pulse height for the first 250 ps impulse segment, sent MS byte
first. The slave responds with the standard 9-byte ACK (no data) message.
"""
MyDataQ=''
if len(FileNameOrStringOrList) == 140*4:#will accept pre-formatted Hex2Byte text
MyDataQ=FileNameOrStringOrList
elif len(FileNameOrStringOrList)==140:#will accept a straight list
for value in range(len(FileNameOrStringOrList)):
MyDataQ+=self._Hex2Byte(int(FileNameOrStringOrList[value]))
elif FileNameOrStringOrList.endswith(('.txt','.csv','.dat')):
with open(FileNameOrStringOrList,'r') as filehead:
RawListQ=filehead.read()
if '\r\n' in RawListQ:
ListedValues=RawListQ.split('\r\n')
elif '\n' in RawListQ:
ListedValues=RawListQ.split('\n')
elif ',' in RawListQ:
ListedValues=RawListQ.split(',')
else:
print('Unrecognized format on input file.')
return False
if len(ListedValues) != 140:
print('File must have 140 entries; entry count: '+str(len(ListedValues)))
return False
for value in range(len(ListedValues)):
MyDataQ+=self._Hex2Byte(int(ListedValues[value]))
else:
print('Bad file entry count: '+str(len(FileNameOrStringOrList)))
return False
HDataQ=self._BaseFunction(self._HighlandSocket,self._HIGHLAND_SLAVE_ADDRESS,CommandCode=3,PollLength=289, ReplyLength=9,MyData=MyDataQ)
return HDataQ
def WritePulseHeights(self,FileNameOrStringOrList=140*[0]):
"""
Wrapped function for the WritePulseHeights command; socket is automatically opened/closed
...
From the T400B manual:
Poll command 3 writes the 140 programmable pulse heights, each an unsigned
16-bit value. The poll <data> field is 280 bytes in size, with the first two bytes being
the programmed pulse height for the first 250 ps impulse segment, sent MS byte
first. The slave responds with the standard 9-byte ACK (no data) message.
"""
HDataQ=self._FunctionWrapper(self._WritePulseHeights,{'FileNameOrStringOrList':FileNameOrStringOrList});
return HDataQ
def _ReadFiducialImpulseSettings(self):
"""
Bare function for the ReadFiducialImpulseSettings command; socket must be explicitly opened/closed
...
From the T400B manual:
Poll command 4 (no <data> field) reads two unsigned integers: the 16-bit value
which determines the height of the auxiliary fiducial impulse generator and a second
16-bit value which determines its delay. Each value is returned in a two-byte reply
data field. Values range from 0 to +65535. The LSB of the time setting is 1 ps. If the
first value (impulse amplitude) is zero, the impulse circuitry will be disabled.
The fiducial impulse is of fixed width (nom 100 ps) and is summed into the main
140-point modulator waveform.
"""
print('**READ FIDUCIAL IMPULSE SETTINGS**')
HDataQ=self._BaseFunction(self._HighlandSocket,self._HIGHLAND_SLAVE_ADDRESS,CommandCode=4,PollLength=9, ReplyLength=13,MyData='')
print('Fiducial pulse height (max 65535): '+str(int(HDataQ[:4],16)))
print('Fiducial pulse delay: '+str(int(HDataQ[4:8],16)))
print('****')
return HDataQ
def ReadFiducialImpulseSettings(self):
"""
Wrapped function for the ReadFiducialImpulseSettings command; socket is automatically opened/closed
...
From the T400B manual:
Poll command 4 (no <data> field) reads two unsigned integers: the 16-bit value
which determines the height of the auxiliary fiducial impulse generator and a second
16-bit value which determines its delay. Each value is returned in a two-byte reply
data field. Values range from 0 to +65535. The LSB of the time setting is 1 ps. If the
first value (impulse amplitude) is zero, the impulse circuitry will be disabled.
The fiducial impulse is of fixed width (nom 100 ps) and is summed into the main
140-point modulator waveform.
"""
HDataQ=self._FunctionWrapper(self._ReadFiducialImpulseSettings);
return HDataQ
def _WriteFiducialImpulseSettings(self,AmpReq=0,TimeReq=0):
"""
Bare function for the WriteFiducialImpulseSettings command; socket must be explicitly opened/closed
...
From the T400B manual:
Poll command 5 writes the fiducial impulse settings, described above. The 4-byte
<data> field sets the impulse height and delay.
"""
print('**WRITE FIDUCIAL IMPULSE SETTINGS**')
MyDataQ=''
MyDataQ+=self._Hex2Byte(int(AmpReq))
MyDataQ+=self._Hex2Byte(int(TimeReq))
HDataQ=self._BaseFunction(self._HighlandSocket,self._HIGHLAND_SLAVE_ADDRESS,CommandCode=5,PollLength=13, ReplyLength=9,MyData=MyDataQ)
print('****')
return HDataQ
def WriteFiducialImpulseSettings(self,AmpReq=0,TimeReq=0):
"""
Wrapped function for the WriteFiducialImpulseSettings command; socket is automatically opened/closed
...
From the T400B manual:
Poll command 5 writes the fiducial impulse settings, described above. The 4-byte
<data> field sets the impulse height and delay.
"""
HDataQ=self._FunctionWrapper(self._WriteFiducialImpulseSettings,{'AmpReq':AmpReq,'TimeReq':TimeReq});
return HDataQ
def _WriteEnableByte(self,EnableTotal=4):
"""
Bare function for the WriteEnableByte command; socket must be explicitly opened/closed
to determine EnableTotal input, start from 0 and:
+1 for Enable CPU self-trigger, 960 Hz (test mode)
+2 for Enable self-trigger, 20 kHz
+4 for Enable external triggers
+8 for Enable the BIAS generators
...
From the T400B manual:
The poll message <data> field contains the single ENABLE byte. Bits in this byte
enable/disable subsystems. Bits are...
BIT FUNCTION WHEN SET
0 Enable CPU self-trigger, 960 Hz (test mode)
1 Enable self-trigger, 20 KHz
2 Enable external triggers
3 Enable the BIAS generators
"""
MyDataQ=''
MyDataQ+=self._Hex1Byte(EnableTotal)
print('**WRITE ENABLE BYTE**')
HDataQ=self._BaseFunction(self._HighlandSocket,self._HIGHLAND_SLAVE_ADDRESS,CommandCode=10,PollLength=10, ReplyLength=9,MyData=MyDataQ)
print('****')
return HDataQ
def WriteEnableByte(self,EnableTotal=4):
"""
Wrapped function for the WriteEnableByte command; socket is automatically opened/closed
to determine EnableTotal input, start from 0 and:
+1 for Enable CPU self-trigger, 960 Hz (test mode)
+2 for Enable self-trigger, 20 kHz
+4 for Enable external triggers
+8 for Enable the BIAS generators
...
From the T400B manual:
The poll message <data> field contains the single ENABLE byte. Bits in this byte
enable/disable subsystems. Bits are...
BIT FUNCTION WHEN SET
0 Enable CPU self-trigger, 960 Hz (test mode)
1 Enable self-trigger, 20 KHz
2 Enable external triggers
3 Enable the BIAS generators
"""
HDataQ=self._FunctionWrapper(self._WriteEnableByte,{'EnableTotal':EnableTotal});
return HDataQ
def _ReadT0Delay(self):
"""
Bare function for the ReadT0Delay command; socket must be explicitly opened/closed
...
From the T400B manual:
The <data> field returns the current 16-bit T0 delay value, with decimal value 0
corresponding to minimum delay, with LSB resolution of 1 ps nominal. The
maximum legal value is 50,000, corresponding to 50 ns nominal delay. This is the
delay applied to the T400 trigger, and shifts all other timed events.
The earliest possible square pulse or fiducial pulse edge will occur 20 ns after the
T0 delay setting, and the first arb glitch will be centered at T0 + 25 ns.
"""
print('**READ T0 DELAY**')
HDataQ=self._BaseFunction(self._HighlandSocket,self._HIGHLAND_SLAVE_ADDRESS,CommandCode=13,PollLength=9, ReplyLength=11,MyData='')
print('T0 delay (max 50000 (50ns)): '+str(int(HDataQ,16)))
print('****')
return int(HDataQ,16)
def ReadT0Delay(self):
"""
Wrapped function for the ReadT0Delay command; socket is automatically opened/closed
...
From the T400B manual:
The <data> field returns the current 16-bit T0 delay value, with decimal value 0
corresponding to minimum delay, with LSB resolution of 1 ps nominal. The
maximum legal value is 50,000, corresponding to 50 ns nominal delay. This is the
delay applied to the T400 trigger, and shifts all other timed events.
The earliest possible square pulse or fiducial pulse edge will occur 20 ns after the
T0 delay setting, and the first arb glitch will be centered at T0 + 25 ns.
"""
HDataQ=self._FunctionWrapper(self._ReadT0Delay);
return HDataQ
def _ReadWaveAmplitudeCalibrations(self):
"""
Bare function for the ReadWaveAmplitudeCalibrations command; socket must be explicitly opened/closed
...
From the T400B manual:
This command reads the waveform impulse calibration table, a list of 140 integers
corresponding to the 140 impulses that form the arbitrary waveform. Each integer
scales the amplitude of its corresponding impulse. The nominal value of each integer
is about 2800 decimal. The T400 is factory calibrated, so these values should not
need to be altered unless a segment board is replaced.
"""
print('**READ WAVE AMPLITUDE CALIBRATIONS**')
HDataQ=self._BaseFunction(self._HighlandSocket,self._HIGHLAND_SLAVE_ADDRESS,CommandCode=15,PollLength=9, ReplyLength=289,MyData='')
HDataQList=[int(HDataQ[ii*4:4+ii*4],16) for ii in range(len(HDataQ)//4)]
print('Wave amplitude calibration (nom 2800): '+str(HDataQList))
print('****')
return HDataQList
def ReadWaveAmplitudeCalibrations(self):
"""
Wrapped function for the ReadWaveAmplitudeCalibrations command; socket is automatically opened/closed
...
From the T400B manual:
This command reads the waveform impulse calibration table, a list of 140 integers
corresponding to the 140 impulses that form the arbitrary waveform. Each integer
scales the amplitude of its corresponding impulse. The nominal value of each integer
is about 2800 decimal. The T400 is factory calibrated, so these values should not
need to be altered unless a segment board is replaced.
"""
HDataQList=self._FunctionWrapper(self._ReadWaveAmplitudeCalibrations);
return HDataQList
def _WriteWaveAmplitudeCalibrations(self, StringOrList):
"""
Bare function for the WriteWaveAmplitudeCalibrations command; socket must be explicitly opened/closed
...
From the T400B manual:
(nothing listed; see description for ReadWaveAmplitudeCalibrations)
"""
MyDataQ=''
if len(StringOrList) == 140*4:#will accept pre-formatted Hex2Byte text
MyDataQ=StringOrList
elif len(StringOrList)==140:#will accept a straight list
for value in range(len(StringOrList)):
MyDataQ+=self._Hex2Byte(int(StringOrList[value]))
else:
print('Bad file entry count: '+str(len(StringOrList)))
return
print('**WRITE WAVE AMPLITUDE CALIBRATIONS**')
HDataQ=self._BaseFunction(self._HighlandSocket,self._HIGHLAND_SLAVE_ADDRESS,CommandCode=16,PollLength=289, ReplyLength=9,MyData=MyDataQ)
print('****')
return HDataQ
def WriteWaveAmplitudeCalibrations(self,StringOrList):
"""
Wrapped function for the WriteWaveAmplitudeCalibrations command; socket is automatically opened/closed
...
From the T400B manual:
(nothing listed; see description for ReadWaveAmplitudeCalibrations)
"""
HDataQ=self._FunctionWrapper(self._WriteWaveAmplitudeCalibrations,{'StringOrList':StringOrList});
return HDataQ
def _ReadWaveTimeCalibrations(self):
"""
Bare function for the ReadWaveTimeCalibrations command; socket must be explicitly opened/closed
...
From the T400B manual:
This command reads the waveform glitch timing table, a list of 140 integers
corresponding to the time of each of the 140 glitches that form the arbitrary
waveform. Each of the ten segment boards generates 14 glitches which must be
spaced 250 ps apart. The table is thus organized as ten sets of 14 integers, with
each set having approximately the same ascending pattern, from about 1000 to
2755 in 14 steps of about 135, with LSB weight of about 1.8 ps.
In order to ensure the best possible arbitrary waveform matching, users should
consider performing occasional online recalibration of the glitch spacings; drifts of as
little as 20 ps can result in significant deviations of synthesized waveforms.
"""
print('**READ WAVE TIME CALIBRATIONS**')
HDataQ=self._BaseFunction(self._HighlandSocket,self._HIGHLAND_SLAVE_ADDRESS,CommandCode=17,PollLength=9, ReplyLength=289,MyData='')
HDataQList=[int(HDataQ[ii*4:4+ii*4],16) for ii in range(len(HDataQ)//4)]
print('Wave time calibrations (max 65535): '+str(HDataQList))
print('****')
return HDataQList
def ReadWaveTimeCalibrations(self):
"""
Wrapped function for the ReadWaveTimeCalibrations command; socket is automatically opened/closed
...
From the T400B manual:
This command reads the waveform glitch timing table, a list of 140 integers
corresponding to the time of each of the 140 glitches that form the arbitrary
waveform. Each of the ten segment boards generates 14 glitches which must be
spaced 250 ps apart. The table is thus organized as ten sets of 14 integers, with
each set having approximately the same ascending pattern, from about 1000 to
2755 in 14 steps of about 135, with LSB weight of about 1.8 ps.
In order to ensure the best possible arbitrary waveform matching, users should
consider performing occasional online recalibration of the glitch spacings; drifts of as
little as 20 ps can result in significant deviations of synthesized waveforms.
"""
HDataQList=self._FunctionWrapper(self._ReadWaveTimeCalibrations);
return HDataQList
def _WriteWaveTimeCalibrations(self, StringOrList):
"""
Bare function for the WriteWaveTimeCalibrations command; socket must be explicitly opened/closed
...
From the T400B manual:
(nothing listed; see description for ReadWaveTimeCalibrations)
"""
MyDataQ=''
if len(StringOrList) == 140*4:#will accept pre-formatted Hex2Byte text
MyDataQ=StringOrList
elif len(StringOrList)==140:#will accept a straight list
for value in range(len(StringOrList)):
MyDataQ+=self._Hex2Byte(int(StringOrList[value]))
else:
print('Bad file entry count: '+str(len(StringOrList)))
return
print('**WRITE WAVE TIME CALIBRATIONS**')
HDataQ=self._BaseFunction(self._HighlandSocket,self._HIGHLAND_SLAVE_ADDRESS,CommandCode=18,PollLength=289, ReplyLength=9,MyData=MyDataQ)
print('****')
return HDataQ
def WriteWaveTimeCalibrations(self,StringOrList):
"""
Wrapped function for the WriteWaveTimeCalibrations command; socket is automatically opened/closed
...
From the T400B manual:
(nothing listed; see description for ReadWaveTimeCalibrations)
"""
HDataQ=self._FunctionWrapper(self._WriteWaveTimeCalibrations,{'StringOrList':StringOrList});
return HDataQ
def _ReadMiscellaneousCalibrations(self):
"""
Bare function for the ReadMiscellaneousCalibrations command; socket must be explicitly opened/closed
...
From the T400B manual:
This command reads a list of 36 unsigned 16-bit integers, referred to as MC0
through MC35. The first 20 are paired (coarse, fine) time settings for the positions in
time of the ten waveform segment boards, which must be spaced 3.5 ns apart. The
remaining cal factors are described below.
MC0-19 paired coarse:fine board delays
MC20 T0 delay time slope
MC21 T0 delay base time offset
MC22 Square pulse amplitude calibrator
MC23 Square pulse amplitude baseline
MC24 Square pulse delay time slope cal
MC25 Square pulse base time offset
MC26 Fiducial impulse amplitude calibrator
MC27 Fiducial impulse amplitude baseline
MC28 Fiducial impulse time slope
MC29 Fiducial impulse base time offset
MC30-35 Spares
"""
print('**READ MISCELLANEOUS CALIBRATIONS**')
HDataQ=self._BaseFunction(self._HighlandSocket,self._HIGHLAND_SLAVE_ADDRESS,CommandCode=19,PollLength=9, ReplyLength=81,MyData='')
HDataQList=[int(HDataQ[ii*4:4+ii*4],16) for ii in range(len(HDataQ)//4)]
print('Miscellaneous calibrations: '+str(HDataQList))
print('****')
return HDataQList
def ReadMiscellaneousCalibrations(self):
"""
Wrapped function for the ReadMiscellaneousCalibrations command; socket is automatically opened/closed
...
From the T400B manual:
This command reads a list of 36 unsigned 16-bit integers, referred to as MC0
through MC35. The first 20 are paired (coarse, fine) time settings for the positions in
time of the ten waveform segment boards, which must be spaced 3.5 ns apart. The
remaining cal factors are described below.
MC0-19 paired coarse:fine board delays
MC20 T0 delay time slope
MC21 T0 delay base time offset
MC22 Square pulse amplitude calibrator
MC23 Square pulse amplitude baseline
MC24 Square pulse delay time slope cal
MC25 Square pulse base time offset
MC26 Fiducial impulse amplitude calibrator
MC27 Fiducial impulse amplitude baseline
MC28 Fiducial impulse time slope
MC29 Fiducial impulse base time offset
MC30-35 Spares
"""
HDataQList=self._FunctionWrapper(self._ReadMiscellaneousCalibrations);
return HDataQList
def _WriteMiscellaneousCalibrations(self,StringOrList):
"""
Bare function for the WriteMiscellaneousCalibrations command; socket must be explicitly opened/closed
...
From the T400B manual:
(nothing listed; see description for ReadMiscellaneousCalibrations)
"""
MyDataQ=''
if len(StringOrList) == 36*4:#will accept pre-formatted Hex2Byte text
MyDataQ=StringOrList
elif len(StringOrList)==36:#will accept a straight list
for value in range(len(StringOrList)):
MyDataQ+=self._Hex2Byte(int(StringOrList[value]))
else:
print('Bad file entry count: '+str(len(StringOrList)))
return
print('**WRITE MISCELLANEOUS CALIBRATIONS**')
HDataQ=self._BaseFunction(self._HighlandSocket,self._HIGHLAND_SLAVE_ADDRESS,CommandCode=20,PollLength=81, ReplyLength=9,MyData=MyDataQ)
print('****')
return HDataQ
def WriteMiscellaneousCalibrations(self,StringOrList):
"""
Wrapped function for the WriteMiscellaneousCalibrations command; socket is automatically opened/closed
...
From the T400B manual:
(nothing listed; see description for ReadMiscellaneousCalibrations)
"""
HDataQ=self._FunctionWrapper(self._WriteMiscellaneousCalibrations,{'StringOrList':StringOrList});
return HDataQ
def _ReadWalkTable(self):
"""
Bare function for the ReadWalkTable command; socket must be explicitly opened/closed
...
From the T400B manual:
This command reads the time walk compensation table. This table contains
32 unsigned integers which compensate for a small interaction between arb glitch
heights and glitch centroid timing.
"""
print('**READ WALK TABLE**')
HDataQ=self._BaseFunction(self._HighlandSocket,self._HIGHLAND_SLAVE_ADDRESS,CommandCode=25,PollLength=9, ReplyLength=73,MyData='')
HDataQList=[int(HDataQ[ii*4:4+ii*4],16) for ii in range(len(HDataQ)//4)]
print('Walk Table: '+str(HDataQList))
print('****')
return HDataQList
def ReadWalkTable(self):
"""
Wrapped function for the ReadWalkTable command; socket is automatically opened/closed
...
From the T400B manual:
This command reads the time walk compensation table. This table contains
32 unsigned integers which compensate for a small interaction between arb glitch
heights and glitch centroid timing.
"""
HDataQList=self._FunctionWrapper(self._ReadWalkTable);
return HDataQList
def _WriteWalkTable(self,StringOrList):
"""
Bare function for the WriteWalkTable command; socket must be explicitly opened/closed
...
From the T400B manual:
(nothing listed; see description for ReadWalkTable)
"""
MyDataQ=''
if len(StringOrList) == 32*4:#will accept pre-formatted Hex2Byte text
MyDataQ=StringOrList
elif len(StringOrList)==32:#will accept a straight list
for value in range(len(StringOrList)):
MyDataQ+=self._Hex2Byte(int(StringOrList[value]))
else:
print('Bad file entry count: '+str(len(StringOrList)))
return
print('**WRITE WALK TABLE**')
HDataQ=self._BaseFunction(self._HighlandSocket,self._HIGHLAND_SLAVE_ADDRESS,CommandCode=26,PollLength=73, ReplyLength=9,MyData=MyDataQ)
HDataQList=[int(HDataQ[ii*4:4+ii*4],16) for ii in range(len(HDataQ)//4)]
print('****')
return HDataQList
def WriteWalkTable(self,StringOrList):
"""
Wrapped function for the WriteWalkTable command; socket is automatically opened/closed
...
From the T400B manual:
(nothing listed; see description for ReadWalkTable)
"""
HDataQ=self._FunctionWrapper(self._WriteWalkTable,{'StringOrList':StringOrList});
return HDataQ
def IndFETWave(self,ListOfPixels,WriteValue):
"""
Primarily used to write patterns of individual pixels on the Highland for the sake of calibration
Historically only really used when recalibrating the Highland, which we don't do that often
"""
itt=0
NewString=''
while itt<140:
if (itt+1) in ListOfPixels:
NewString+=self._Hex2Byte(WriteValue)
else:
NewString+='0000'
itt+=1
return NewString
# #FIX THIS!!!
# def ___FETsurvey(HSQ,LS1Q):
# #HSQ=HOpen()
# #time.sleep(.15)
# #LS1Q=LOpen()
# #time.sleep(.15)
# qdatalist=[]
# for ii in range(140):
# WritePulseHeights(HSQ,0,IndFETWave([ii+1],28000))#could improve by doing a few spread-out points at a time
# time.sleep(6)
# qpixnodata=readchan(1,LS1Q)['DATA']
# qpixnomax=max(qpixnodata)
# qpixnomaxindex=np.mean([i for i,j in enumerate(qpixnodata) if j == qpixnomax])##could improve by changing to abbreviated centroid around peak, avoiding tail-end bump
# qdatalist.append([qpixnomaxindex,qpixnomax])
# #time.sleep(.15)
# #HClose(HSQ)
# #time.sleep(.15)
# #LClose(LS1Q)
# #time.sleep(.15)
# return qdatalist
# =============================================================================
# =============================================================================
# #FIX THIS!!!
# def ___FastFETSurvey(HSQ,LS1Q):
# #HSQ=HOpen()
# #time.sleep(.15)
# #LS1Q=LOpen()
# #time.sleep(.15)
# qdatalist=[]
# for ii in range(10):
# WritePulseHeights(HSQ,0,IndFETWave([jj*10+ii+1 for jj in range(14)],28000))#could improve by doing a few spread-out points at a time
# time.sleep(6)
# qpixnodata=readchan(1,LS1Q)['DATA']
# qpeakcoords=signal.find_peaks_cwt(np.clip(qpixnodata,max(qpixnodata)/5,max(qpixnodata)),np.arange(180,200))#threshold->clip
# #this is 475ps/(2.5ps/pix)=190pix expected for S1 scope at max sampling; S2 scope needs different
# if len(qpeakcoords) != 15:
# print('\nWrong number of peaks detected!\n',len(qpeakcoords))
# return
# qdatalist.append([qpeakcoords[1:],qpixnodata[qpeakcoords[1:]]])
# #time.sleep(.15)
# #HClose(HSQ)
# #time.sleep(.15)
# #LClose(LS1Q)
# #time.sleep(.15)
# qdatalist2=np.array(qdatalist).transpose(2,0,1).reshape(140,2)
# qTimeErrInHpix=np.array([qdatalist2[ii,0]-np.mean(qdatalist2[:14,0])+100*(6.5-ii) for ii in range(140)])*2.5/1.8
# qTimeErrInHpixBoardAvg=np.array([np.mean(qTimeErrInHpix[14*ii:14*ii+14]) for ii in range(10)])
# epl(qTimeErrInHpix)
# epl(qTimeErrInHpixBoardAvg)
# return np.array([qTimeErrInHpix,qTimeErrInHpixBoardAvg])
# =============================================================================
# =============================================================================
# #FIX THIS!!!
# def ___VeryFastFETSurvey(HSQ,LS1Q):#returns np.array([qTimeErrInHpix,qTimeErrInHpixBoardAvg])
# #HSQ=HOpen()
# #time.sleep(.15)
# #LS1Q=LOpen()
# #time.sleep(.15)
# qdatalist=[]
# qrfis=ReadFiducialImpulseSettings(HSQ,0)
# WriteFiducialImpulseSettings(HSQ,0,4000,45000)#try full-time 15000,45000
# time.sleep(.15)
# for ii in range(4):
# WritePulseHeights(HSQ,0,IndFETWave([jj*4+ii+1 for jj in range(35)],15000))#could improve by doing a few spread-out points at a time
# time.sleep(6)
# qpixnodata=readchan(1,LS1Q)['DATA']
# #qpeakcoords=signal.find_peaks_cwt(np.clip(qpixnodata,max(qpixnodata)/4,max(qpixnodata)),np.arange(90,110))#threshold->clip
# qpeakcoordspre=signal.find_peaks_cwt(np.clip(qpixnodata,max(qpixnodata)/4,max(qpixnodata))-max(qpixnodata)/4,np.arange(90,110));
# qpeakcoords=[pt for pt in qpeakcoordspre if qpixnodata[pt]>max(qpixnodata)/4]
# #this is 475ps/(2.5ps/pix)=190pix expected for S1 scope at max sampling; S2 scope needs different
# if len(qpeakcoords) != 36:
# print('\nWrong number of peaks detected!\n',len(qpeakcoords))
# #time.sleep(.15)
# #HClose(HSQ)
# #time.sleep(.15)
# #LClose(LS1Q)
# #time.sleep(.15)
# return
# #qdatalist.append([qpeakcoords[1:],qpixnodata[qpeakcoords[1:]]])
# #try post-pulse trigger instead
# qdatalist.append([qpeakcoords[:-1],qpixnodata[qpeakcoords[:-1]]])
# #time.sleep(.15)
# #HClose(HSQ)
# #time.sleep(.15)
# #LClose(LS1Q)
# #time.sleep(.15)
# qdatalist2=np.array(qdatalist).transpose(2,0,1).reshape(140,2)
# qTimeErrInHpix=np.array([qdatalist2[ii,0]-np.mean(qdatalist2[:14,0])+100*(6.5-ii) for ii in range(140)])*2.5/1.8
# qTimeErrInHpixBoardAvg=np.array([np.mean(qTimeErrInHpix[14*ii:14*ii+14]) for ii in range(10)])
# epl(qTimeErrInHpix)
# epl(qTimeErrInHpixBoardAvg)
# return np.array([qTimeErrInHpix,qTimeErrInHpixBoardAvg])
# =============================================================================
# =============================================================================
# #FIX THIS!!!
# def ___MiscCalCorrection(HSQ,MiscCalOldQ,TimeErrInHpixBoardAvgQ):
# MiscCalNewQ=MiscCalOldQ[:]
# for ii in range(10):
# MiscCalNewQ[1+2*ii]=MiscCalNewQ[1+2*ii]-int(round(TimeErrInHpixBoardAvgQ[ii]))
# WriteMiscellaneousCalibrations(HSQ,0,MiscCalNewQ)
# time.sleep(.15)
# return MiscCalNewQ
# =============================================================================
# =============================================================================
# #FIX THIS!!!
# def ___WaveTimeCalCorrection(HSQ,WaveTimeCalOldQ,TimeErrInHpixQ):
# WaveTimeCalNewQ=WaveTimeCalOldQ[:]
# for ii in range(140):
# WaveTimeCalNewQ[ii]=WaveTimeCalNewQ[ii]-int(round(TimeErrInHpixQ[ii]))
# WriteWaveTimeCalibrations(HSQ,0,WaveTimeCalNewQ)
# time.sleep(.15)
# return WaveTimeCalNewQ
# =============================================================================
# =============================================================================
# #FIX THIS!!!
# def ___ScanAndShift(HSQ,LS1Q):
# #ideally: auto handling of sockets
# #need error handling/better stability of code first
# #HSQ=HOpen()
# #time.sleep(.15)
# #LS1Q=LOpen()
# #time.sleep(.15)
# #way to check scope settings? currently did this with the following:
# #5 mV/div, 10sweeps, 5ns/div, -33ns delay, 13ns deskew, -13.8mV offset
# ##### use on 20210209: on 5n/div on LeCroy1, trigger off another channel, set fiducial at 9th div
# #ideally: read YFE settings, turn down, turn back up after done
# #set fiducial and everything like that
# scanresults=[]
# #we don't care about historical values; we just want things fixed
# #so don't read in/pass in parameters; just get them straight from HSQ
# PulseHeightQ=ReadPulseHeights(HSQ,0)
# MiscCalQ=ReadMiscellaneousCalibrations(HSQ,0)
# time.sleep(.15)
# WaveTimeCalQ=ReadWaveTimeCalibrations(HSQ,0)
# time.sleep(.15)
# scanresults.append(VeryFastFETSurvey(HSQ,LS1Q))
# #test if need correction
# if any(abs(elem)>2.5 for elem in scanresults[-1][1]):
# print('Adjusting MiscCal\n')
# MiscCalQ=MiscCalCorrection(HSQ,MiscCalQ,scanresults[-1][1])
# time.sleep(.15)
# scanresults.append(VeryFastFETSurvey(HSQ,LS1Q))
# if any(abs(elem)>5.5 for elem in scanresults[-1][0]):
# #this is factor of 2 away from "bad" ("20ps"=11.1 Hpix of error)
# print('Adjusting WaveTimeCal\n')
# WaveTimeCalQ=WaveTimeCalCorrection(HSQ,WaveTimeCalQ,scanresults[-1][0])
# time.sleep(.15)
# scanresults.append(VeryFastFETSurvey(HSQ,LS1Q))
# if any(abs(elem)>2.5 for elem in scanresults[-1][1]) or any(abs(elem)>5.5 for elem in scanresults[-1][0]):
# print('Consider running a second iteration')
# #ideally: re-cal with for loops and iterate until corrected
# #time.sleep(.15)
# #HClose(HSQ)
# #time.sleep(.15)
# #LClose(LS1Q)
# #time.sleep(.15)
# WritePulseHeights(HSQ,0,PulseHeightQ)
# return
# =============================================================================
# =============================================================================
# #FIX THIS!!!
# def ___FETsurveyfull():
# HSQ=HOpen()
# time.sleep(.15)
# LS1Q=LXOpen('1')
# time.sleep(.15)
# qdatalist=[]
# for ii in range(140):
# qptdatalist=[]
# for jj in range(5):
# WritePulseHeights(HSQ,0,IndFETWave([ii+1],int((jj+1)*65535/5)))#could improve by doing a few spread-out points at a time
# time.sleep(24)
# qpixnodata=readchan(1,LS1Q)['DATA'][2400:]
# qpixnomax=max(qpixnodata)
# qpixnomaxindex=np.mean([i for i,j in enumerate(qpixnodata) if j == qpixnomax])##could improve by changing to abbreviated centroid around peak, avoiding tail-end bump
# qptdatalist.append([2400+qpixnomaxindex,qpixnomax])
# qdatalist.append(qptdatalist)
# efc.pickledump2(qdatalist,psfilepath()+'fullFETsurvey20181106.p')
# time.sleep(.15)
# HClose(HSQ)
# time.sleep(.15)
# LXClose(LS1Q)
# time.sleep(.15)
# return qdatalist
# =============================================================================
# =============================================================================
# #FIX THIS!!!
# def HParamReset():##need to fix pickle... !!!
# HSQ=HOpen()
# time.sleep(.15)
# [qrphQ,qrwacQ,qrmcQ,qrwtcQ,qrwtQ]=pickle.load(open(psfilepath()+'HighlandParameterSnapshot20181116.p','rb'))#1108 original
# WriteFiducialImpulseSettings(HSQ,0,0,0) #turn off fiducial; was at (HSQ,0,65535,0)
# time.sleep(.15)
# WritePulseHeights(HSQ,0,qrphQ)
# #time.sleep(.15)
# #WriteWaveAmplitudeCalibrations(HSQ,0,qrwacQ)
# #time.sleep(.15)
# #WriteMiscellaneousCalibrations(HSQ,0,qrmcQ)
# #time.sleep(.15)
# #WriteWaveTimeCalibrations(HSQ,0,qrwtcQ)
# #time.sleep(.15)
# #WriteWalkTable(HSQ,0,qrwtQ)
# #time.sleep(.15)
# time.sleep(.15)
# HClose(HSQ)
# time.sleep(.15)
# return
# =============================================================================
# =============================================================================
# #FIX THIS!!!
# def findfid(TraceInQ):
# #make it fast by narrowing area where we know peak should be
# TQ=TraceInQ[:]; maxTQ=max(TQ); minTQ=min(TQ);
# TQP=signal.find_peaks_cwt(np.clip(TQ,(maxTQ-.8*(maxTQ-minTQ)),maxTQ),np.arange(5,15));
# if (TQP[-1]-TQP[-2]) > 1000:
# return TQP[-1]
# else:
# print('check your answer...')
# return TQP[-1]
# =============================================================================
# =============================================================================
# def HParamSnapshot():
# """Once revived again, function will serve to save and preserve the various settings of the Highland"""
# pass
# =============================================================================
def FidOn(self,amplitude=20000,delay_ps=45125):
"""
Shortcut function for turning on the Highland's fiducial impulse used for proper timing of the oscilloscope delay
If necessary, non-standard values can be passed into the function using
amplitude (usu. ~20000, depending on energy saturation in YFE; range 0-65000) and
delay_ps (usu. 45125 to be in correct location, but can be scanned to help e.g. calibrate a streak camera)
"""
try:
self.WriteFiducialImpulseSettings(amplitude,delay_ps);
except:
print('Error!')
return False
return
def FidOff(self):
"""
Shortcut function for turning off the Highland's fiducial impulse
"""
try:
self.WriteFiducialImpulseSettings(0,0);
except:
print('Error!')
return False
return
class LOSC:
"""
Class containing all the necessary functions for running the LeCroy oscilloscopes
Because we have several such scopes, instantiation of a certain device is required
Possible scope choices are:
my_scope=LOSC('A') #for the YFE, 1in1w, and SHG_opt diodes
my_scope=LOSC('B') #for the four 2in1w diodes
my_scope=LOSC('1') #for the instruments scientists' oscilloscope
my_scope=LOSC('2') #for the four 2in2w diodes
Unless speed is necessary, it is usually most appropriate to interface with a LeCroy simply by using LOSC('[1/2/A/B]').[command]
This will take care of all of the socket opening/closing by itself.
Example: read out all the current waveform amplitudes on scope '2' using wvfm4=LOSC('2').rchall()
Example: wait for a fresh acquisition before reading out channel 2's voltage vs time on scope '1' using ch2_wvfm=LOSC('1').waitrchxy(2)
(Alternatively, use the approach above: my_scope = LOSC('A') and then wvfm4=my_scope.rchall() or my_scope.pchxy(4) or whatever)
Possible commands that can be used as described above include:
:waitrch #wait and read specified channel amplitude
:waitrchxy #wait and read specified channel amplitude and time
:rch #immediately read specified channel amplitude
:rchxy #immediately read specified channel amplitude and time
:rchall #immediately read amplitude for all channels
:rchallxy #immediately read amplitude and time for all channels
:sch #save plot of specified channel amplitude to file
:schall #save plot of amplitude for all channels to file
:pch #plot specified channel amplitude
:pchxy #plot specified channel amplitude vs time
:pchall #plot amplitude of all channels
:pchallxy #plot amplitude vs time of all channels
:sumch #sum apmlitude of specified channel
:save_scope_to_eLog #save specified channel amplitude to eLog
:RestoreConfig #restore acquisition settings according to internal memory
There are also functions available for use with bare sockets; these tend to start with underscores.
See the docstrings for more guidance.
Potential future work:
: adding more functionality from the LeCroy manual using the _ctrl function
"""
def __init__(self, LStrQ):
"""
Initialize a LeCroy oscilloscope for use; possible choices are:
LStrQ='A' #for the YFE, 1in1w, and SHG_opt diodes
LStrQ='B' #for the four 2in1w diodes
LStrQ='1' #for the instruments scientists' oscilloscope
LStrQ='2' #for the four 2in2w diodes
"""
if str(LStrQ).lower() == 'a':
self._hostIP = GLOBAL.LECROY_A_IP #'172.21.46.100'#'scope-ics-meclas-lecroy-a'
self._name = 'LeCroyA'
elif str(LStrQ).lower() == 'b':
self._hostIP = GLOBAL.LECROY_B_IP #'172.21.46.120'#'scope-ics-meclas-lecroy-b'#
self._name = 'LeCroyB'
elif str(LStrQ).lower() == '1':
self._hostIP = GLOBAL.LECROY_1_IP #'172.21.46.60'#'scope-ics-mectc1-1'
self._name = 'LeCroy1'
elif str(LStrQ).lower() == '2':
self._hostIP = GLOBAL.LECROY_2_IP #'172.21.46.128'#'scope-ics-meclas-lecroy-02'
self._name = 'LeCroy2'
elif str(LStrQ).lower() == 'l':
self._hostIP = GLOBAL.LECROY_L_IP #'172.21.160.252'#'scope-ics-meclas-lecroy-02'
self._name = 'LeCroyL'
else:
print('Invalid scope name! Choose 1, 2, A, or B!!')
return False
self._LSock = None
self._port = 1861
def _Open(self):
"""
Takes care of opening the socket to the specified LeCroy; if called explicitly like this,
it MUST be followed by a _Close() statement or else you'll block the socket and need to
locally disable/enable its networking card (or power cycle the unit, but this is not preferred!!)
Using this function allows one to leave the socket open, which allows for quicker access to scope functions.
:Example: after my_scope = LOSC('1') and my_scope._Open() then one may use functions inside a loop, e.g.
save_traces = [my_scope._waitrchxy(4) for ii in range(25)] to save 25 consecutive fresh traces without
opening and closing the socket in between each acquistion;
WARNING: DO NOT FORGET to close the socket at the end of your loop (etc.) using my_scope._Close()
In general, there is an "underscored" version of most of the functions mentioned in the LOSC docstring that
can be used in the way described above (e.g. _waitrchxy, _rchall, _pch, etc.). There are also some
specialty functions like _ctrl that allow for programming of the oscilloscope using VB commands that
can be found in the LeCroy manual.
"""
if not self._LSock:
try:
self._LSock=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._LSock.settimeout(1.0)
self._LSock.connect((self._hostIP,self._port))
except:
print(self._name +' NOT CONNECTED!')
else:
print('Socket may already be open!')
def _Close(self):
"""
Takes care of closing the socket to the specified LeCroy if preceded by an _Open() statement
Use this after you have taken care of all of your business that you started with _Open()
Example: my_scope=LOSC('1')
my_scope._Open()
#a bunch of consecutive scope calls using underscored commands like _rch, _pchall, etc.
my_scope._Close()
"""
try:
self._LSock.close()
self._LSock=None
except:
print('Unable to close socket -- it may already be closed')
return
def _send_and_reply(self,msg,SendOnly=False):
"""
Generic utility for sending a poll to the specified LeCroy's internal processor and receiving its reply
Not typically used in external scripts -- mostly just for creating the functions in this class
"""
try:
x = bytearray()
msg=bytearray(msg,'utf8') ##NEW FIX ATTEMPT
x.append(0x81) # Data with EOI terminator
x.append(1) # Header v1
x.append(0) # Sequence Number
x.append(0) # Spare
l = len(msg) + 1
x.append((l >> 24) & 0xff) # MSB!
x.append((l >> 16) & 0xff)
x.append((l >> 8) & 0xff)
x.append((l >> 0) & 0xff)
x.extend(msg)
##x.append('\n')
x.extend(bytearray('\n','utf8'))#WAS APPEND
self._LSock.sendall(x)
if not SendOnly:
data = ""
done = False
while not done:
hdr = self._LSock.recv(8) ##a bytes object
hdr = hdr.decode('latin1')##try sg...
done = (ord(hdr[0]) & 1) == 1 ##had an ord(hdr[0])
l = struct.unpack(">i", bytes(hdr[4:8],encoding='latin1'))[0]##ADDED bytes(...)
while (l != 0):
d = self._LSock.recv(l)##########################################
d = d.decode('latin1')##try sg....
data = data + d#.decode('utf-8')
l -= len(d)
return data
except:
print('Send and reply failed!')
@staticmethod
def _LFields():
"""List of fields needed for interpreting read-out data of LeCroy fields"""
fields = [
[0,"DESCRIPTOR_NAME","string"],[16,"TEMPLATE_NAME","string"],[32,"COMM_TYPE","enum", {0:"byte",1: "word",}],
[34,"COMM_ORDER","enum", {0:"HIFIRST",1:"LOFIRST",}],[36,"WAVE_DESCRIPTOR","long"],[40,"USER_TEXT","long"],
[44,"RES_DESC1","long"],[48,"TRIGTIME_ARRAY","long"],[52,"RIS_TIME_ARRAY","long"],[56,"RES_ARRAY1","long"],
[60,"WAVE_ARRAY_1","long"],[64,"WAVE_ARRAY_2","long"],[68,"RES_ARRAY2","long"],[72,"RES_ARRAY3","long"],
[76,"INSTRUMENT_NAME","string"],[92,"INSTRUMENT_NUMBER","long"],[96,"TRACE_LABEL","string"],
[112,"RESERVED1","word"],[114,"RESERVED2","word"],[116,"WAVE_ARRAY_COUNT","long"],[120,"PNTS_PER_SCREEN","long"],
[124,"FIRST_VALID_PNT","long"],[128,"LAST_VALID_PNT","long"],[132,"FIRST_POINT","long"],
[136,"SPARSING_FACTOR","long"],[140,"SEGMENT_INDEX","long"],[144,"SUBARRAY_COUNT","long"],
[148,"SWEEPS_PER_ACQ","long"],[152,"POINTS_PER_PAIR","word"],[154,"PAIR_OFFSET","word"],
[156,"VERTICAL_GAIN","float"],[160,"VERTICAL_OFFSET","float"],[164,"MAX_VALUE","float"],
[168,"MIN_VALUE","float"],[172,"NOMINAL_BITS","word"],[174,"NOM_SUBARRAY_COUNT","word"],
[176,"HORIZ_INTERVAL","float"],[180,"HORIZ_OFFSET","double"],[188,"PIXEL_OFFSET","double"],
[196,"VERTUNIT","unit_definition"],[244,"HORUNIT","unit_definition"],[292,"HORIZ_UNCERTAINTY","float"],
[296,"TRIGGER_TIME","time_stamp"],[312,"ACQ_DURATION","float"],
[316,"RECORD_TYPE","enum",{0:"single_sweep",1:"interleaved",2:"histogram",3:"graph",4:"filter_coefficient",
5:"complex",6:"extrema",7:"sequence_obsolete",8:"centered_RIS",9:"peak_detect",}],
[318,"PROCESSING_DONE","enum",{0:"no_processing",1:"fir_filter",2:"interpolated",3:"sparsed",
4:"autoscaled",5:"no_result",6:"rolling",7:"cumulative",}],
[320,"RESERVED5","word"],[322,"RIS_SWEEPS","word"],
[324,"TIMEBASE","enum",{0:"1_ps/div",1:"2_ps/div",2:"5_ps/div",3:"10_ps/div",4:"20_ps/div",5:"50_ps/div",
6:"100_ps/div",7:"200_ps/div",8:"500_ps/div",9:"1_ns/div",10:"2_ns/div",11:"5_ns/div",12:"10_ns/div",
13:"20_ns/div",14:"50_ns/div",15:"100_ns/div",16:"200_ns/div",17:"500_ns/div",18:"1_us/div",19:"2_us/div",
20:"5_us/div",21:"10_us/div",22:"20_us/div",23:"50_us/div",24:"100_us/div",25:"200_us/div",26:"500_us/div",
27:"1_ms/div",28:"2_ms/div",29:"5_ms/div",30:"10_ms/div",31:"20_ms/div",32:"50_ms/div",33:"100_ms/div",
34:"200_ms/div",35:"500_ms/div",36:"1_s/div",37:"2_s/div",38:"5_s/div",39:"10_s/div",40:"20_s/div",
41:"50_s/div",42:"100_s/div",43:"200_s/div",44:"500_s/div",45:"1_ks/div",46:"2_ks/div",47:"5_ks/div",
100: "EXTERNAL",}],
[326,"VERT_COUPLING","enum",{0:"DC_50_Ohms",1:"ground",2:"DC_1MOhm",3:"ground",4:"AC_1MOhm",}],
[328,"PROBE_ATT","float"],
[332,"FIXED_VERT_GAIN","enum",{0:"1_uV/div",1:"2_uV/div",2:"5_uV/div",3:"10_uV/div",4:"20_uV/div",
5:"50_uV/div",6:"100_uV/div",7:"200_uV/div",8:"500_uV/div",9:"1_mV/div",10:"2_mV/div",11:"5_mV/div",
12:"10_mV/div",13:"20_mV/div",14:"50_mV/div",15:"100_mV/div",16:"200_mV/div",17:"500_mV/div",
18:"1_V/div",19:"2_V/div",20:"5_V/div",21:"10_V/div",22:"20_V/div",23:"50_V/div",24:"100_V/div",
25:"200_V/div",26:"500_V/div",27:"1_kV/div",}],
[334,"BANDWIDTH_LIMIT","enum",{0:"off",1:"on",}],[336,"VERTICAL_VERNIER","float"],[340,"ACQ_VERT_OFFSET","float"],
[344,"WAVE_SOURCE","enum",{0:"CHANNEL_1",1:"CHANNEL_2",2:"CHANNEL_3",3:"CHANNEL_4",9:"UNKNOWN",}],]
return fields
@classmethod
def _parsewf(cls, data, verbose=False):
"""Internal function needed for parsing read-out data of LeCroy fields"""
fields = cls._LFields()
x = data.find(",#9")
l = int(data[x+3:x+12])##
data = data[x+12:x+12+l]
d = {}
for f in fields:
if f[2] == "string" or f[2] == "unit_definition" or f[2] == "text":
d[f[1]] = data[f[0]:f[0]+16].rstrip('\0')
if (verbose): print("%30s %s" % (f[1], d[f[1]]))
elif f[2] == "enum":
d[f[1]] = f[3][struct.unpack("<h", bytes(data[f[0]:f[0]+2],encoding='latin1'))[0]]##bytes(...,encoding='latin1')
if (verbose): print("%30s %s" % (f[1], d[f[1]]))
elif f[2] == "word":
d[f[1]] = struct.unpack("<h", bytes(data[f[0]:f[0]+2],encoding='latin1'))[0]##bytes...
if (verbose): print("%30s %s" % (f[1], d[f[1]]))
elif f[2] == "long":
d[f[1]] = struct.unpack("<i", bytes(data[f[0]:f[0]+4],encoding='latin1'))[0]##bytes...
if (verbose): print("%30s %i" % (f[1], d[f[1]]))
elif f[2] == "float":
d[f[1]] = struct.unpack("<f", bytes(data[f[0]:f[0]+4],encoding='latin1'))[0]##bytes...
if (verbose): print("%30s %g" % (f[1], d[f[1]]))
elif f[2] == "double":
d[f[1]] = struct.unpack("<d", bytes(data[f[0]:f[0]+8],encoding='latin1'))[0]##bytes...
if (verbose): print("%30s %g" % (f[1], d[f[1]]))
elif f[2] == "time_stamp":
d[f[1]] = "{}:{}:{} {}/{}/{}".format(data[f[0]+9],
data[f[0]+8],
struct.unpack("<d", bytes(data[f[0]:f[0]+8],encoding='latin1'))[0],##bytes...
data[f[0]+11],
data[f[0]+10],
struct.unpack("<h", bytes(data[f[0]+12:f[0]+14],encoding='latin1'))[0])
if (verbose): print("%30s %s" % (f[1], d[f[1]]))
else:
if (verbose): print("***** %24s %s" % (f[1], f[2]))
if struct.unpack("<h", bytes(data[32:34],encoding='latin1'))[0] == 0:##bytes...
d['RAW'] = np.frombuffer(bytes(data[346:],encoding='latin1'), dtype=np.int8)
else:
d['RAW'] = np.frombuffer(bytes(data[346:],encoding='latin1'), dtype=np.int16)
d['DATA'] = d['VERTICAL_GAIN'] * d['RAW'] - d['VERTICAL_OFFSET']
return d
def _FunctionWrapper(self,FuncQ,kwargs={}):
"""
A function wrapper that allows one to call each LeCroy command directly without having to worry about opening/closing sockets;
if issuing one-off commands that don't require high-frequency consistent execution, this is sufficient
In short, this automatically wraps an _Open() and _Close() statement around an "underscored" function (like _rchall)
in order to create a function (like rchall) that can be used without explicitly worrying about good socket habits
"""
try:
self._Open();time.sleep(0.15);
LData=FuncQ(**kwargs);time.sleep(0.15);
self._Close();time.sleep(0.15);
except:
self._Close();time.sleep(0.15);
LData=False
return LData
def _waitrch(self,ChannelNo,verbose=False):
"""
Bare function for reading voltage data from the specified channel after reading the internal state change register;
socket must be explicitly opened/closed
"""
while True:
ready = (int(self._send_and_reply("INR?").split()[1]) & 1) == 1
if ready:
rawdataq = self._send_and_reply("C{}:WAVEFORM? ALL".format(str(ChannelNo)))
fullaq = self._parsewf(rawdataq, verbose)
return fullaq
def waitrch(self,ChannelNo,verbose=False):
"""
Wrapped function for reading voltage data from the specified channel after reading the internal state change register;
socket is automatically opened/closed
"""
LData=self._FunctionWrapper(self._waitrch,{'ChannelNo':ChannelNo,'verbose':verbose});
return LData
def _waitrchxy(self,ChannelNo,verbose=False):
"""
Bare function for reading voltage data and corresponding time data from the specified channel after reading the internal state change register;
socket must be explicitly opened/closed
"""
while True:
ready = (int(self._send_and_reply("INR?").split()[1]) & 1) == 1
if ready:
rawdataq = self._send_and_reply("C{}:WAVEFORM? ALL".format(str(ChannelNo)))
fullaq = self._parsewf(rawdataq, verbose)
yvals=fullaq['DATA'];xvals=[fullaq['HORIZ_OFFSET'] + ii*fullaq['HORIZ_INTERVAL'] for ii in range(len(fullaq['DATA']))];
return [xvals,yvals]
def waitrchxy(self,ChannelNo,verbose=False):
"""
Wrapped function for reading voltage data and corresponding time data from the specified channel after reading the internal state change register;
socket is automatically opened/closed
"""
LData=self._FunctionWrapper(self._waitrchxy,{'ChannelNo':ChannelNo,'verbose':verbose});
return LData
def _rch(self,OChan):
"""
Bare function for reading voltage data from the specified channel;
socket must be explicitly opened/closed
"""
rawdataq = self._send_and_reply("C{}:WAVEFORM? ALL".format(str(OChan)))
return self._parsewf(rawdataq, False)['DATA']
def rch(self,OChan):
"""
Wrapped function for reading voltage data from the specified channel;
socket is automatically opened/closed
"""
LData=self._FunctionWrapper(self._rch,{'OChan':OChan});
return LData
def _rchxy(self,OChan):
"""
Bare function for reading voltage data and corresponding time data from the specified channel;
socket must be explicitly opened/closed
"""
rawdataq = self._send_and_reply("C{}:WAVEFORM? ALL".format(str(OChan)))
fullaq=self._parsewf(rawdataq, False);
yvals=fullaq['DATA'];xvals=[fullaq['HORIZ_OFFSET'] + ii*fullaq['HORIZ_INTERVAL'] for ii in range(len(fullaq['DATA']))];
return [xvals,yvals]
def rchxy(self,OChan):
"""
Wrapped function for reading voltage data and corresponding time data from the specified channel;
socket is automatically opened/closed
"""
LData=self._FunctionWrapper(self._rchxy,{'OChan':OChan});
return LData
def _rchall(self):
"""
Bare function for reading voltage data from all channels of specified scope;
socket must be explicitly opened/closed
"""
rchans=[]
for OChan in range(1,5):
rchans.append(self._rch(OChan))
return rchans
def rchall(self):
"""
Wrapped function for reading voltage data from all channels of specified scope;
socket is automatically opened/closed
"""
LData=self._FunctionWrapper(self._rchall);
return LData
def _rchallxy(self):
"""
Bare function for reading voltage data and corresponding time data from all channels of specified scope;
socket must be explicitly opened/closed
"""
rchans=[]
for OChan in range(1,5):
rchans.append(self._rchxy(OChan))
return rchans
def rchallxy(self):
"""
Wrapped function for reading voltage data and corresponding time data from all channels of specified scope;
socket is automatically opened/closed
"""
LData=self._FunctionWrapper(self._rchallxy);
return LData
def _sch(self,OChan,FileName):
"""
Bare function for saving voltage data from the specified channel;
socket must be explicitly opened/closed
"""
rawdataq = self._send_and_reply("C{}:WAVEFORM? ALL".format(OChan))
parseddataq=self._parsewf(rawdataq, False)
with open(GLOBAL.PSFILEPATH+'data/'+str(FileName)+'.csv','w',newline='') as f:
writer=csv.writer(f, delimiter='\n')
writer.writerow(parseddataq['DATA'])
with open(GLOBAL.PSFILEPATH+'data/'+str(FileName)+'-h.csv','w',newline='') as f:
writer=csv.DictWriter(f, parseddataq.keys())
writer.writeheader()
writer.writerow(parseddataq)
return
def sch(self,OChan,FileName):
"""
Wrapped function for saving voltage data from the specified channel;
socket is automatically opened/closed
"""
LData=self._FunctionWrapper(self._sch,{'OChan':OChan,'FileName':FileName});
return LData
def _schall(self,FileName):
"""
Bare function for saving voltage data from all channels of specified scope;
socket must be explicitly opened/closed
"""
for OChan in range(1,5):
self._sch(OChan,FileName+'_ch'+str(OChan))
return
def schall(self,FileName):
"""
Wrapped function for saving voltage data from all channels of specified scope;
socket is automatically opened/closed
"""
LData=self._FunctionWrapper(self._schall,{'FileName':FileName});
return LData
def _pch(self,OChan):
"""
Bare function for plotting voltage data from the specified channel;
socket must be explicitly opened/closed
"""
pdata=self._rch(OChan)
ep.l(pdata)
return
def pch(self,OChan):
"""
Wrapped function for plotting voltage data from the specified channel;
socket is automatically opened/closed
"""
LData=self._FunctionWrapper(self._pch,{'OChan':OChan});
return LData
def _pchxy(self,OChan):
"""
Bare function for plotting voltage data and corresponding time data from the specified channel;
socket must be explicitly opened/closed
"""
pdata=self._rchxy(OChan)
ep.lxy(*pdata)
return
def pchxy(self,OChan):
"""
Wrapped function for plotting voltage data and corresponding time data from the specified channel;
socket is automatically opened/closed
"""
LData=self._FunctionWrapper(self._pchxy,{'OChan':OChan});
return LData
def _pchall(self):
"""
Bare function for plotting voltage data from all channels of specified scope;
socket must be explicitly opened/closed
"""
pdata=self._rchall()
ep.ll(pdata)
return
def pchall(self):
"""
Wrapped function for plotting voltage data from all channels of specified scope;
socket is automatically opened/closed
"""
LData=self._FunctionWrapper(self._pchall);
return LData
def _pchallxy(self):
"""
Bare function for plotting voltage data and corresponding time data from all channels of specified scope;
socket must be explicitly opened/closed
"""
pdata=self._rchallxy()
ep.llxy(pdata)
return
def pchallxy(self):
"""
Wrapped function for plotting voltage data and corresponding time data from all channels of specified scope;
socket is automatically opened/closed
"""
LData=self._FunctionWrapper(self._pchallxy);
return LData
def _sumch(self,OChan):
"""
Bare function for summing all voltage values from the specified channel;
socket must be explicitly opened/closed
"""
templistQ=self._rch(OChan)
return np.sum(templistQ)
def sumch(self,OChan):
"""
Wrapped function for summing all voltage values from the specified channel;
socket is automatically opened/closed
"""
LData=self._FunctionWrapper(self._sumch,{'OChan':OChan});
return LData
def save_scope_to_eLog(self,chan_to_eLog=2):
"""
Saves specified scope channel (data + voltage vs time plot) to the eLog
Example: LOSC('1').save_scope_to_eLog(chan_to_eLog=2) saves channel 2 of LeCroy1
to the current eLog
"""
ExpName=LPL.get_curr_exp()
fpQ=str('/reg/neh/operator/mecopr/experiments/'+ExpName+'/lecroy_xray/')
chan_to_eLog = int(chan_to_eLog)
if chan_to_eLog not in [1,2,3,4]:
print('Channel must be 1, 2, 3, or 4! Using channel 2 as default.')
chan_to_eLog=2;
if not os.path.exists(fpQ[-13]):
print('File path '+fpQ[-13]+' does not exist! Trying to create it...')
try:
os.makedirs(fpQ[-13]);print('Folder created successfully!');
os.chmod(fpQ[-13],stat.S_IRWXU|stat.S_IRWXG|stat.S_IRWXO);
except:
print('Failed to create '+fpQ[-13]+'!')
if not os.path.exists(fpQ):
print('File path '+fpQ+' does not exist! Trying to create it...')
try:
os.makedirs(fpQ);print('Folder created successfully!');
os.chmod(fpQ,stat.S_IRWXU|stat.S_IRWXG|stat.S_IRWXO);
except:
print('Failed to create '+fpQ+'!')
try:
mecel = elog.ELog({'experiment':ExpName},user='mecopr',pw=pickle.load(open(GLOBAL.PSFILEPATH+'elogauth.p','rb')))
#print('Connected to eLog of current MEC experiment, which is: '+ExpName)
except:
print('Failed to connect to eLog!')
try:
chdata=self.rchallxy();
timestamp=datetime.now().strftime('%Y%m%d_%H%M%S')
except:
print('Failed to read out data!')
for ii in range(4):
np.savetxt(fpQ+'lecroy1_ch'+str(ii+1)+'_'+timestamp+'.dat',tuple(chdata[ii]))
ep.lxysav(chdata[chan_to_eLog-1][0],chdata[chan_to_eLog-1][1],fpQ+'lecroy1_ch'+str(chan_to_eLog)+'_'+timestamp+'.png',abs_path=True)
fullmsg=str('Scope trace data for all 4 channels saved to '+fpQ+' with time stamp '+timestamp+'. Attached are the data and plot files for channel '+str(chan_to_eLog)+'.')
#eplxy(chdata[chan_to_eLog-1][0],chdata[chan_to_eLog-1][1])
try:
mecel.post(fullmsg,attachments=[fpQ+'lecroy1_ch'+str(chan_to_eLog)+'_'+timestamp+'.dat', fpQ+'lecroy1_ch'+str(chan_to_eLog)+'_'+timestamp+'.png'], tags=['scope_trace'])
print('Auto-saved to eLog.')
except:
print('Failed to auto-save to eLog!')
def _ctrl(self,msg,SendOnly=True):
"""
Once completed, this function will be used for sending specialized commands to the specified LeCroy; this can be found in the manual
Other old scribbles:
SetScopeParameters(kwargs) --> scan for different inputs like those listed below;
could also specify use case and let it set or RCL appropriately
(msg='TIME_DIV?',SendOnly=False)
(msg='*RCL 3',SendOnly=False)
(msg='TDIV 100E-9',SendOnly=True)
(msg='C1:VDIV 500E-3',SendOnly=True)
NOTE: ignore the backslash before the triple quotes below if you are reading the bare code... it is there to avoid closing the docstring
(msg=r\"""vbs 'app.acquisition.triggermode = "single" ' \""",SendOnly=True)
(msg=r\"""vbs 'app.acquisition.c1.deskew = 0 ' \""",SendOnly=True)
(msg=r\"""vbs 'app.acquisition.c2.AverageSweeps = 10 ' \""",SendOnly=True)
(msg=r\"""vbs 'app.acquisition.c2.EnhancedResType = 2 ' \""",SendOnly=True) #0:None, 1:0.5bits, 2: 1.0bits, ..., 6: 3.0bits
"""
self._send_and_reply(msg,SendOnly=SendOnly)
def RestoreConfig(self):
"""
Demonstration of one way to use the _ctrl function by means of _FunctionWrapper
Function recalls scope configuration from scope internal memory file #1
(note: administratively this is the memory file where we keep the latest config)
Example: LOSC('A').RestoreConfig() resets LeCroyA's scope setup according to internal memory file #1
This is especially useful if scope settings were mistakenly changed and need to be restored
in order to allow for appropriate pulse shaping performance
"""
LData=self._FunctionWrapper(self._ctrl,{'msg':'*RCL 1','SendOnly':False});
return LData
class EMeters:
"""
Class containing readout functions for energy meters on all MEC laser systems
Typically used via EMeters.[command]
Possible commands include:
:LPLInChamber #returns in-chamber energy meter read-outs
:EG #returns 2in2w energy meter read-outs (scaled by calibration factors)
:EG1wYFE1in #returns 2in2w energy meter read-outs (scaled by calibration factors)
:EG1w2in #returns 2in2w energy meter read-outs (scaled by calibration factors)
:EGall #returns 2in2w energy meter read-outs (scaled by calibration factors)
:SPLEG #returns energy of SPL energy meter diagnostics
:gentec_refresh #refreshes settings of LPL Gentec meters
:E_coeff_refresh #refreshes energy coefficients
:E_synth_refresh #refreshes synthesized energies written to notepad PVs
See the docstrings for more guidance.
Potential future work:
: improve SPLEG accuracy currently affected by changing ambient conditions
"""
def LPLInChamber(printDisplay=False):
"""
Returns energy values for in-chamber LPL Gentec meters
printDisplay=True means the values will be printed to terminal in addition to being returned in an array
printDisplay=False means the values will not be printed to terminal while still being returned in an array
"""
tvalW=GLOBAL.EGLPLWest.get();
tvalE=GLOBAL.EGLPLEast.get();
if printDisplay:
print('Power meter: WEST: ' + str(tvalW) + ', EAST: ' + str(tvalE) + ', TOTAL: ' + str(tvalW+tvalE))
return [tvalW, tvalE]
def EG():
"""
Gathers all the energy meter readouts for the 2in2w outputs and scales them appropriately
Returns an array containing an array of the guessed values and an array of the in-chamber measured values
"""
eab=GLOBAL.EGLPL2in2wAB.get()
eef=GLOBAL.EGLPL2in2wEF.get()
egh=GLOBAL.EGLPL2in2wGH.get()
eij=GLOBAL.EGLPL2in2wIJ.get()
EAB=round(GLOBAL.Ecoeff2in2wAB.get()*eab,4)
EEF=round(GLOBAL.Ecoeff2in2wEF.get()*eef,4)
EGH=round(GLOBAL.Ecoeff2in2wGH.get()*egh,4)
EIJ=round(GLOBAL.Ecoeff2in2wIJ.get()*eij,4)
guessarray=[[EAB,EEF,EGH,EIJ],round(EAB+EEF,4),round(EGH+EIJ,4),round(EAB+EEF+EGH+EIJ,4)]
tempglobarr=[GLOBAL.EAB2w,GLOBAL.EEF2w,GLOBAL.EGH2w,GLOBAL.EIJ2w]
for ii in range(4):
try:
tempglobarr[ii].put(guessarray[0][ii]*PFN.HeadENB()[ii])
except:
print('Failed to update notepad PV!')
try:
EABEF=GLOBAL.EGLPLWest.get()
EGHIJ=GLOBAL.EGLPLEast.get()
except:
EABEF=-1;EGHIJ=-1
realarray=[EABEF,EGHIJ,EABEF+EGHIJ]
#print(realarray)
return [guessarray,realarray]
def EG1wYFE1in():
"""
Gathers the energy meter readouts for the YFE and 1in outputs and scales them appropriately
Returns an array containing the guess for each
"""
eyfe=GLOBAL.EGLPLYFE.get()
e1in=GLOBAL.EGLPL1in1w.get()
EYFE,E1IN=GLOBAL.EcoeffYFE.get()*eyfe,GLOBAL.Ecoeff1in1wCD.get()*e1in#was 0.3578
guessarray=[round(EYFE,4),round(E1IN,4)]
tempglobarr=[GLOBAL.EYFE,GLOBAL.ECD1w]
for ii in range(2):
try:
tempglobarr[ii].put(guessarray[ii])
except:
print('Failed to update notepad PV!')
return guessarray
def EG1w2in():
"""
Gathers all the energy meter readouts for the 2in1w outputs and scales them appropriately
Returns an array containing the guessed values of each
"""
eab=GLOBAL.EGLPL2in1wAB.get()
eef=GLOBAL.EGLPL2in1wEF.get()
egh=GLOBAL.EGLPL2in1wGH.get()
eij=GLOBAL.EGLPL2in1wIJ.get()
EAB=round(GLOBAL.Ecoeff2in1wAB.get()*eab,4)
EEF=round(GLOBAL.Ecoeff2in1wEF.get()*eef,4)
EGH=round(GLOBAL.Ecoeff2in1wGH.get()*egh,4)
EIJ=round(GLOBAL.Ecoeff2in1wIJ.get()*eij,4)
guessarray=[[EAB,EEF,EGH,EIJ],round(EAB+EEF,4),round(EGH+EIJ,4),round(EAB+EEF+EGH+EIJ,4)]
tempglobarr=[GLOBAL.EAB1w,GLOBAL.EEF1w,GLOBAL.EGH1w,GLOBAL.EIJ1w]
for ii in range(4):
try:
tempglobarr[ii].put(guessarray[0][ii]*PFN.HeadENB()[ii])
except:
print('Failed to update notepad PV!')
return guessarray
def EGall(return_txt=False,chamber_meter_in=False, return_energy_only=False):
"""
Gathers all the energy meter outputs from EG1eYFE1in(), EG1w2in(), and EG()
Use return_text=True to return the energy report as a string
Use return_text=False to return the numerical energies in an array instead
Use chamber_meter_in=True to add a line in the print-out about the in-chamber LPL energy read-outs
Use chamber_meter_in=False to suppress a line in the print-out about the in-chamber LPL energy read-outs
Use return_energy_only=True to return ONLY the numerical energies in an array -- no terminal printing, no message
Use return_energy_only=False to allow for the terminal printing and the message return (if return_text=True)
"""
[en1wYFE, en1w1in] = EMeters.EG1wYFE1in()
[en1wAB, en1wEF, en1wGH, en1wIJ] = EMeters.EG1w2in()[0]
[en2wAB, en2wEF, en2wGH, en2wIJ] = EMeters.EG()[0][0]
[enWEST, enEAST]=EMeters.EG()[1][:2]
[cAB,cEF,cGH,cIJ]=PFN.HeadENB()
if return_energy_only:
return [en1wYFE, en1w1in, cAB*en1wAB, cEF*en1wEF, cGH*en1wGH, cIJ*en1wIJ, cAB*en2wAB, cEF*en2wEF, cGH*en2wGH, cIJ*en2wIJ]
###
wppvlist=[GLOBAL.HWPAB, GLOBAL.HWPEF, GLOBAL.HWPGH, GLOBAL.HWPIJ];
headstr='';wpstr='';headlist=['AB','EF','GH','IJ'];
for ii in range(4):
wpstr=headstr+wpstr+headlist[ii]+': '+str(round(wppvlist[ii].get(),3))+', '
wpenlist=[1e-12 + PFN.HeadENB()[ii]*np.cos((np.pi/180)*2*wppvlist[ii].get())**2 for ii in range(4)]
###
strlist=[]
strlist.append('Date: '+ datetime.now().strftime('%A, %d. %B %Y %I:%M:%S%p'))
print(strlist[-1]);strlist.append('\n');
strlist.append('Time since last shot: '+GLOBAL.PFNSS.get())
print(strlist[-1]);strlist.append('\n');
Psns=LPL._Psns_get()#pickle.load(open(psfpQ+'Psns.p','rb'))
SSs=LPL._SSs_get()#pickle.load(open(psfpQ+'SSs.p','rb'))
strlist.append('Current pulse target is: '+str(list(Psns))+' ns, '+str([list(SSs_sub) for SSs_sub in SSs])+' % of max power.')
print(strlist[-1]);strlist.append('\n');
strlist.append('YFE: '+'{:5.1f}'.format(1000*round(en1wYFE,4))+'mJ, 1"@1w: '+'{:5.2f}'.format(en1w1in)+'J')
print(strlist[-1]);strlist.append('\n');
strlist.append('2"@1w: AB: '+'{:5.2f}'.format(round(cAB*en1wAB,4))+'J, EF: '+'{:5.2f}'.format(round(cEF*en1wEF,4))+'J, GH: '+'{:5.2f}'.format(round(cGH*en1wGH,4))+'J, IJ: '+'{:5.2f}'.format(round(cIJ*en1wIJ,4))+'J')
print(strlist[-1]);strlist.append('\n');
strlist.append('2"@2w: AB: '+'{:5.2f}'.format(round(cAB*en2wAB,4))+'J, EF: '+'{:5.2f}'.format(round(cEF*en2wEF,4))+'J, GH: '+'{:5.2f}'.format(round(cGH*en2wGH,4))+'J, IJ: '+'{:5.2f}'.format(round(cIJ*en2wIJ,4))+'J')
print(strlist[-1]);strlist.append('\n');
if np.sum(wpenlist) < 4:
#strlist.append('2"@2w/HWP: AB: '+'{:5.2f}'.format(round(cAB*en2wAB/wpenlist[0],4))+'J, EF: '+'{:5.2f}'.format(round(cEF*en2wEF/wpenlist[1],4))+'J, GH: '+'{:5.2f}'.format(round(cGH*en2wGH/wpenlist[2],4))+'J, IJ: '+'{:5.2f}'.format(round(cIJ*en2wIJ/wpenlist[3],4))+'J')
#print(strlist[-1]);strlist.append('\n');
#strlist.append('Conv%/HWP: AB: '+'{:5.2f}'.format(round(100*cAB*en2wAB/en1wAB/wpenlist[0],4))+'%, EF: '+'{:5.2f}'.format(round(100*cEF*en2wEF/en1wEF/wpenlist[1],4))+'%, GH: '+'{:5.2f}'.format(round(100*cGH*en2wGH/en1wGH/wpenlist[2],4))+'%, IJ: '+'{:5.2f}'.format(round(100*cIJ*en2wIJ/en1wIJ/wpenlist[3],4))+'%')
#print(strlist[-1]);strlist.append('\n');
pass
else:
strlist.append('Conv%: AB: '+'{:5.2f}'.format(round(100*cAB*en2wAB/en1wAB,4))+'%, EF: '+'{:5.2f}'.format(round(100*cEF*en2wEF/en1wEF,4))+'%, GH: '+'{:5.2f}'.format(round(100*cGH*en2wGH/en1wGH,4))+'%, IJ: '+'{:5.2f}'.format(round(100*cIJ*en2wIJ/en1wIJ,4))+'%')
print(strlist[-1]);strlist.append('\n');
if chamber_meter_in:
strlist.append('Measured energy: WEST: '+'{:5.2f}'.format(round(enWEST,4))+'J, EAST: '+'{:5.2f}'.format(round(enEAST,4))+'J')
print(strlist[-1]);strlist.append('\n');
strlist.append('Inferred energy: WEST: '+'{:5.2f}'.format(round(cAB*en2wAB+cEF*en2wEF,4))+'J, EAST: '+'{:5.2f}'.format(round(cGH*en2wGH+cIJ*en2wIJ,4))+'J, TOTAL: '+'{:5.2f}'.format(round(cAB*en2wAB+cEF*en2wEF+cGH*en2wGH+cIJ*en2wIJ,4)))
print(strlist[-1]);strlist.append('\n');
if return_txt:
tot_msg='';
for seg in strlist:
tot_msg+=seg
return tot_msg
return [en1wYFE, en1w1in, cAB*en1wAB, cEF*en1wEF, cGH*en1wGH, cIJ*en1wIJ, cAB*en2wAB, cEF*en2wEF, cGH*en2wGH, cIJ*en2wIJ]
def SPLEG():
"""
returns the scaled energy meter read-outs of the shot-pulse laser
energy array output in order of [regen, TOPAS, MPA1, MPA2]
(currently not widely used due to sampling percentage variability due to changing ambient conditions)
"""
#later add in GLOBALS.EREGEN .ETOPAS .EMPA1 .EMPA2
reen=GLOBAL.EGSPLRE.get()
toen=GLOBAL.EGSPLTO.get()
m1en=GLOBAL.EGSPLM1.get()
m2en=GLOBAL.EGSPLM2.get()
regenen=GLOBAL.EcoeffRE1*reen + GLOBAL.EcoeffRE0
topasen=GLOBAL.EcoeffTO1*toen + GLOBAL.EcoeffTO0
mpa1en=GLOBAL.EcoeffM11*m1en + GLOBAL.EcoeffM10 #1.76e5, -2.22e0
mpa2en=GLOBAL.EcoeffM21*m2en + GLOBAL.EcoeffM20
return np.round([regenen,topasen,mpa1en,mpa2en],2)
def gentec_refresh():
"""
Shorthand way of resetting all the desired input parameters of several different Gentec meters (mostly for 2in beams)
"""
pvhead='MEC:LAS:GENTEC:';pvtails=['DESCRIPTION','SET_WAVLEN','SET_SCALE','SET_TRIGMODE','SET_TRIGLVL','SET_ATTENUATOR'];#'SET_SCALE' was 'SCALE'
pvids=['02:CH1:','02:CH2:','01:CH1:','01:CH2:','03:CH1:','03:CH2:','04:CH1:','04:CH2:'];
abirvals=['AB IRsamp',1053,24,1,2,0]#24 was '1' with 'SCALE'
efirvals=['EF IRsamp',1053,24,1,2,0]
ghirvals=['GH IRsamp',1053,24,1,2,0]
ijirvals=['IJ IRsamp',1053,24,1,2,0]
ab2wvals=['AB 2wsamp',527,23,1,2,0]#23 was '300m' with 'SCALE'
ef2wvals=['EF 2wsamp',527,23,1,2,0]
gh2wvals=['GH 2wsamp',527,23,1,2,0]
ij2wvals=['IJ 2wsamp',527,23,1,2,0]
pvwest=['MEC:GENTEC:01:CH2:DESCRIPTION','MEC:GENTEC:01:CH2:SET_WAVLEN','MEC:GENTEC:01:CH2:SET_SCALE','MEC:GENTEC:01:CH2:SET_TRIGMODE','MEC:GENTEC:01:CH2:SET_TRIGLVL','MEC:GENTEC:01:CH2:SET_ATTENUATOR']
westvals=['West ABEF',527,28,1,2,1]#28 was '100' with 'SCALE'
pveast=['MEC:GENTEC:01:CH1:DESCRIPTION','MEC:GENTEC:01:CH1:SET_WAVLEN','MEC:GENTEC:01:CH1:SET_SCALE','MEC:GENTEC:01:CH1:SET_TRIGMODE','MEC:GENTEC:01:CH1:SET_TRIGLVL','MEC:GENTEC:01:CH1:SET_ATTENUATOR']
eastvals=['East GHIJ, short pulse',527,28,1,2,1]#28 was '100' with 'SCALE'
pvgroups = [[pvhead+pvid+pvtail for pvtail in pvtails] for pvid in pvids];pvgroups.append(pvwest);pvgroups.append(pveast);
valgroups = [abirvals,efirvals,ghirvals,ijirvals,ab2wvals,ef2wvals,gh2wvals,ij2wvals,westvals,eastvals]
for pvgroup,valgroup in zip(pvgroups,valgroups):
for pvx,valx in zip(pvgroup,valgroup):
temppv=EpicsSignal(pvx)
temppv.put(valx)
def E_coeff_refresh():
"""Resets E_coefficients in notepad PVs based on coefficients contained in GLOBAL"""
GLOBAL.notepadPVreset()
#pvlist=['MEC:LAS:FLOAT:'+str(ii) for ii in range(31,41)];
#inddesclist=['YFE','CD1w','AB1w','EF1w','GH1w','IJ1w','AB2w','EF2w','GH2w','IJ2w']
#desclist=['E_coeff_'+inddesc for inddesc in inddesclist]
#valulist=[.3578,0.5971,224.0,177.5,307.4*0.849,113.2,111.0*1.17,187.9*0.860,182.1*0.897,123.5*1.25]
#for jj in range(len(pvlist)):
# temppv1=EpicsSignal(str(pvlist[jj]+'.DESC'));temppv2=EpicsSignal(pvlist[jj]);
# temppv1.put(desclist[jj]);temppv2.put(valulist[jj]);
def E_synth_refresh():
"""Updates the synthetic energy guesses (based on energy readouts) stored in notepad PVs"""
pvlist=['MEC:LAS:FLOAT:'+str(ii).zfill(2) for ii in range(1,11)];
inddesclist=['YFE','CD1w','AB1w','EF1w','GH1w','IJ1w','AB2w','EF2w','GH2w','IJ2w']
desclist=['E_synth_'+inddesc for inddesc in inddesclist]
#add in again for SPL meters once coefficient is stable
#pvlist2=['MEC:LAS:FLOAT:'+str(ii).zfill(2) for ii in range(21,25)];
#inddesclist2=['REGEN','TOPAS','MPA1','MPA2']
#desclist2=['E_synth_'+inddesc2 for inddesc2 in inddesclist2]
eyfe=GLOBAL.EGLPLYFE.get();
e1in=GLOBAL.EGLPL1in1w.get();
eab1w=GLOBAL.EGLPL2in1wAB.get();
eef1w=GLOBAL.EGLPL2in1wEF.get();
egh1w=GLOBAL.EGLPL2in1wGH.get();
eij1w=GLOBAL.EGLPL2in1wIJ.get();
eab2w=GLOBAL.EGLPL2in2wAB.get();
eef2w=GLOBAL.EGLPL2in2wEF.get();
egh2w=GLOBAL.EGLPL2in2wGH.get();
eij2w=GLOBAL.EGLPL2in2wIJ.get();
energyarr=np.array([eyfe,e1in,eab1w,eef1w,egh1w,eij1w,eab2w,eef2w,egh2w,eij2w])
coefflist=[]
for ii in range(31,41):
temppv=EpicsSignal('MEC:LAS:FLOAT:'+str(ii));
coefflist.append(temppv.get());
valulist=energyarr*np.array(coefflist)
for jj in range(len(pvlist)):
temppv1=EpicsSignal(str(pvlist[jj]+'.DESC'));temppv2=EpicsSignal(pvlist[jj]);
temppv1.put(desclist[jj]);temppv2.put(valulist[jj]);
# for jj in range(len(pvlist2)):
# temppv1=EpicsSignal(str(pvlist2[jj]+'.DESC'));temppv2=EpicsSignal(pvlist2[jj]);
# temppv1.put(desclist2[jj]);temppv2.put(valulist2[jj]);
#Add in SPL meters later'
return
class MBC:
"""
Class for controlling different functions of the MBC bias control box used with the LPL front-end seed laser
These functions are meant mostly for laser experts.
Usage can simply proceed via MBC.[command]
Potential commands include:
:ModeCheck()
:IsSafe()
:Reset()
Check docstrings of individual functions for more details
Potential future improvements:
- dither parameter control (if possible -- would need expansion of IOC capability)
"""
def ModeCheck():
"""Returns the current mode of the MBC, either 0 (AUTO) or 1 (MANUAL)"""
return GLOBAL.MBCmode.get()
def IsSafe():#re-write checks as individual functions
"""
Verifies that MBC is operating safely -- powered on, in AUTO/MIN mode, not out of voltage range, no fault detected
Returns True if all systems nominal, returns False if something was amiss
"""
status = True
print('Checking MBC status...')
if GLOBAL.MBCpwr.get() != 1:
print('MBC is not on!!')
status*=False
if GLOBAL.MBCmode.get() != 0:
print('MBC is not in AUTO mode!!')
status*=False
if GLOBAL.MBCsetpt.get() != 1:
print('MBC is not in MIN mode!!')
status*=False
if not -7000<GLOBAL.MBCbias.get()<7000:
print('MBC is out of range!')
status*=False
if GLOBAL.MBCfault.get() != 0:
print('MBC fault detected!')
status*=False
if status:
biaschk=[]
print('Checking MBC bias level...',end='',flush=True)
for ii in range(3):
biaschk.append(GLOBAL.MBCbias.get())
time.sleep(1);print('..',end='',flush=True);time.sleep(1);print('..',end='',flush=True);
print('*')
if np.max(np.abs(np.diff(biaschk))) > 5:
print('MBC bias level unstable!')
return False
else:
return True
else:
return False
def Reset():
"""
Resets the bias control box to make sure it is working properly
"""
YFE.SetAll(False);
#add KeyboardInterrupt?
print('Begin resetting the MBC...')
if GLOBAL.MBCpwr.get() != 1:
print('Powering on MBC, starting scan...',end='',flush=True)
GLOBAL.MBCpwr.put(1);time.sleep(1);print('.',end='',flush=True);GLOBAL.MBCmode.put(0);
efc.dotsleep(8);
if GLOBAL.MBCfault.get() != 0:
print('Attempting to reset MBC fault...',end='',flush=True)
GLOBAL.MBCfault.put(1)
time.sleep(2);print('*');
if GLOBAL.MBCmode.get() != 0:
print('Setting MBC to AUTO mode, starting scan...',end='',flush=True)
GLOBAL.MBCmode.put(0)
efc.dotsleep(8);
if GLOBAL.MBCsetpt.get() != 1:
print('Setting MBC to MIN mode,starting scan...',end='',flush=True)
GLOBAL.MBCsetpt.put(1)
time.sleep(2);print('*');
inibias=GLOBAL.MBCbias.get()
if not -7000<inibias<7000:
print('MBC is out of range! Aborting and power-cycling...');
GLOBAL.MBCbias.put((np.round(time.time()*1000)%2)*9000*np.sign(inibias));time.sleep(1);
GLOBAL.MBCpwr.put(2);time.sleep(2);
MBC.Reset();return
biaschk=[]
print('Checking the initial MBC bias level...',end='',flush=True)
for ii in range(3):
biaschk.append(GLOBAL.MBCbias.get())
time.sleep(1);print('..',end='',flush=True);time.sleep(1);print('..',end='',flush=True);
print('*')
waitloop=True;loopcnt=0;
biaschklog=[]
biaschklog.append(np.sum(np.abs(np.diff(biaschk))))
while waitloop:
newchk=np.abs(np.diff(biaschk))
biaschklog.append(np.sum(newchk))
if np.sum(newchk) > 3:
print('MBC bias level unstable... '+str(biaschk),end='',flush=True)
biaschk=[]
for ii in range(3):
biaschk.append(GLOBAL.MBCbias.get())
time.sleep(1);print('..',end='',flush=True);time.sleep(1);print('..',end='',flush=True);
print('')
loopcnt+=1
if ((loopcnt >= 10) and (biaschklog[-1] > biaschklog[-2] + 1)) or (loopcnt >= 20):
print('MBC bias level stability fail. Aborting and power-cycling...')
GLOBAL.MBCbias.put((np.round(time.time()*1000)%2)*9000*np.sign(biaschk[-1]));time.sleep(1);
GLOBAL.MBCpwr.put(2);time.sleep(2);
MBC.Reset();return
else:
print('MBC bias level stabilized... '+str(biaschk))
waitloop = False
return
class YFE:
"""
Class for organizing functions associated with the YLF Front End (YFE) laser system
Usage can simply proceed via YFE.[command]
Potential commands include:
:OnCheck() #checks whether YFE is on or off
:On() #initiates turn-on sequence
:Off() #initiates shut-off sequence
:Get() #retrieves eDrive current sepoints and RBVs
:Set(mmQ,currQ) #changes current setpoint corresponding to certain rod diameters
:SetAll(IOBool) #turns on or off all eDrive currents without turning off emission
:Trace() #plots oscilloscope trace of YFE output
Check docstrings of individual functions for more details
"""
def OnCheck(display=True):
"""
Checks whether YFE is turned on or off
Returns False if turned off; returns True if turned on
Use display=True to print current YFE status to terminal
Use display=False to avoid printing current YFE status to terminal
"""
YFEadd='MEC:LPL:LCO:0'
YFEamp=['2','3','5','6','1','4']
YFEsuf=[':SensedCurrent',':ActiveCurrent',':PowerSupply',':Temperature',':Emission_RBV',':Emission',':FaultState.RVAL',':ClearFault']
statuslist=[]
for ii in range(len(YFEamp)):
temprbvemispv=EpicsSignal(YFEadd+YFEamp[ii]+YFEsuf[4])
statuslist.append(temprbvemispv.get())#check emission
if np.sum(statuslist)==0:
YFEonbool=False
if 0<np.sum(statuslist)<6:
print('Warning: YFE seems to be partially on/off.')
YFEonbool=False
if np.sum(statuslist)==6:
YFEonbool=True
if display:
print('Current status: '+str(statuslist))
return YFEonbool
@classmethod
def On(cls,CtrlChk=True):
"""
Initiates turn-on procedure for YFE laser system, including several preliminary equipment checks; may take a minute or so
Use CtrlChk=True to check the LPL control system (pertinent hosts, IOCs, PVs, etc.) as part of the turn-on procedure
Use CtrlChk=False to avoid check the LPL control system as part of the turn-on procedure
"""
if YFE.OnCheck(display=False):
print('YFE emission already enabled.');return
if GLOBAL.LPLPCpwr.get() != 1:
GLOBAL.LPLPCpwr.put(1)
if GLOBAL.LPLVACpwr.get() != 1:
GLOBAL.LPLVACpwr.put(1)
if GLOBAL.LPLPS1pwr.get() != 1:
GLOBAL.LPLPS1pwr.put(1)
YFEadd='MEC:LPL:LCO:0'
YFEamp=['2','3','5','6','1','4']
YFEsuf=[':SensedCurrent',':ActiveCurrent',':PowerSupply',':Temperature',':Emission_RBV',':Emission',':FaultState.RVAL',':ClearFault']
print('Turning on YFE! Checking for faults...')
faultpvlist=[EpicsSignal(YFEadd+amplabel+YFEsuf[6], write_pv=YFEadd+amplabel+YFEsuf[7]) for amplabel in YFEamp]
emisspvlist=[EpicsSignal(YFEadd+amplabel+YFEsuf[4]) for amplabel in YFEamp]
faultstatlist=[faultpv.get() for faultpv in faultpvlist]
if any(faultstatlist):
print('YFE fault state detected! Trying to reset...')
for faultpv in faultpvlist:
faultpv.put(1)
time.sleep(5)#3 sec seems to be too short sometimes for it to clear
faultstatlist=[faultpv.get() for faultpv in faultpvlist]
if any(faultstatlist):
print('YFE fault state still detected, turn-on failed.')
return False
else:
print('Fault cleared!')
if CtrlChk == True:
print('Checking LPL controls status (~10sec)!')
PVsuccess = CtrlSys.pv_checker(pv='lpl')
if not PVsuccess:
print('Control error detected! Continue with system turn-on? [y/n]')
checkprompt = efc.getch_with_TO(TOsec=10,display=False)
if checkprompt not in ('y','Y'):
print('Try again later then!');
return False
else:
print('OK, I hope you know what you\'re doing!')
if not MBC.IsSafe():
cls.SetAll(False,displayQ=False)
print('MBC not configured properly!')
MBC.Reset()
cls.On(CtrlChk=False);
else: #later add check to avoid over-energizing by reading power meter
for ii in range(len(YFEamp)):
tempsetcurrpv=EpicsSignal(YFEadd+YFEamp[ii]+YFEsuf[1])
tempsetemispv=EpicsSignal(YFEadd+YFEamp[ii]+YFEsuf[5])
tempsetcurrpv.put(0)#set current to 0
tempsetemispv.put(1)#turn on emission
print('Initializing eDrives...',end='',flush=True)
efc.dotsleep(10);
emissstatlist=[emisspv.get() for emisspv in emisspvlist]
if all(emissstatlist):
print('Ramping up currents...')
cls.SetAll(True);
print('YFE LASER ON')
else:
print('Turn on sequence failed. Check emission!')
cls.Off();
return False
if GLOBAL.LPLPCpwr.get() != 1:
print('Failed to turn on Pockels cell!')
if GLOBAL.LPLVACpwr.get() != 1:
print('Failed to turn on scroll pump!')
if GLOBAL.LPLPS1pwr.get() != 1:
print('Failed to turn on YFE PS1!')
return True
def Off():
"""
Initiates turn-off procedure for YFE laser system
"""
GLOBAL.LPLPCpwr.put(2)
GLOBAL.LPLVACpwr.put(2)
GLOBAL.LPLPS1pwr.put(2)
YFEadd='MEC:LPL:LCO:0'
YFEamp=['2','3','5','6','1','4']
YFEsuf=[':SensedCurrent',':ActiveCurrent',':PowerSupply',':Temperature',':Emission_RBV',':Emission',':FaultState.RVAL']
for ii in range(len(YFEamp)):
tempsetcurrpv=EpicsSignal(YFEadd+YFEamp[ii]+YFEsuf[1])
tempsetemispv=EpicsSignal(YFEadd+YFEamp[ii]+YFEsuf[5])
tempsetcurrpv.put(0)#set current to 0
tempsetemispv.put(0)#turn on emission
GLOBAL.PFNmode.put(0)#makes sure glass chillers turn off when YFEoff... people have been leaving them on
GLOBAL.MBCpwr.put(2)
print('YFE LASER OFF')
return
def Get(display=True):
"""
Retrieves eDrive current sepoints and RBVs for all six diode-pumped heads within the YFE
Use display=True to print the requested and actual eDrive currents to terminal
Use display=False to avoid printing the requested and actual eDrive currents to terminal
"""
YFEadd='MEC:LPL:LCO:0'
YFEamp=['2','3','5','6','1','4']
YFEsuf=[':SensedCurrent',':ActiveCurrent',':PowerSupply',':Temperature',':Emission_RBV',':Emission',':FaultState.RVAL']
currreqQ=[]
curractQ=[]
for ii in range(len(YFEamp)):
tempsetcurrpv=EpicsSignal(YFEadd+YFEamp[ii]+YFEsuf[1])
tempactcurrpv=EpicsSignal(YFEadd+YFEamp[ii]+YFEsuf[0])
currreqQ.append(round(tempsetcurrpv.get(),4))
curractQ.append(round(tempactcurrpv.get(),4))
if display:
print('Requested currents: '+str(currreqQ))
print('Actual currents: '+str(curractQ))
return currreqQ
def Set(mmQ,currQ,display=True):
"""
Changes current setpoint corresponding to certain rod diameters mmQ
Choices for rod diameters mmQ are 2, 6, or 10
Current limits for currQ are 88, 135, and 140, respectively
Nominal values for currQ are 85, 130, and 124, respectively
Use display=True to print message to terminal of what values were changed
Use display=False to avoid writing such a message to terminal
Example: Set(10,100) changes the 10mm head to 100A
"""
YFEadd='MEC:LPL:LCO:0'
YFEamp=['2','3','5','6','1','4']
YFEsuf=[':SensedCurrent',':ActiveCurrent',':PowerSupply',':Temperature',':Emission_RBV',':Emission',':FaultState.RVAL']
changelist=[]
if mmQ==2:
changelist=range(4)
tempoldcurr02pv=EpicsSignal(YFEadd+YFEamp[changelist[0]]+YFEsuf[1])
oldcurrQ=str(tempoldcurr02pv.get())
if currQ>88:
print('Too high!')
return
elif mmQ==6:
changelist=[4]
tempoldcurr06pv=EpicsSignal(YFEadd+YFEamp[changelist[0]]+YFEsuf[1])
oldcurrQ=str(tempoldcurr06pv.get())
if currQ>135:
print('Too high!')
return
elif mmQ==10:
changelist=[5]
tempoldcurr10pv=EpicsSignal(YFEadd+YFEamp[changelist[0]]+YFEsuf[1])
oldcurrQ=str(tempoldcurr10pv.get())
if currQ>140:
print('Too high!')
return
else:
print('No such head!')
return
currmeapv=EpicsSignal(YFEadd+YFEamp[changelist[0]]+YFEsuf[0])
currmeaQ=currmeapv.get()
for amphead in changelist:
tempnewcurrpv=EpicsSignal(YFEadd+YFEamp[amphead]+YFEsuf[1])
if currQ-currmeaQ>20:
nostep=int((currQ-currmeaQ)/20.0)
for ii in range(nostep):
tempnewcurrpv.put(currQ-int(((1.0*nostep-ii)/(nostep+1.0))*(currQ-currmeaQ)))
time.sleep(1)
tempnewcurrpv.put(currQ)
if display:
print(str(mmQ)+' mm changed from ' + oldcurrQ + ' to ' + str(currQ))
return
@classmethod
def SetAll(cls,IOBool,displayQ=False):
"""
Shorthand way of adjusting all eDrive currents to nominal setpoints all together
Use IOBool=True to turn all eDrives up to their nominal active setpoint
:(reminder: 2mm --> 85A, 6mm --> 130A, 10mm --> 124A)
Use IOBool=False to turn all eDrives down to zero without turning off the emission
:(very convenient when resetting the MBC, for example; takes much less time than turning off and on eDrive emission)
Use displayQ=True to print out current setpoint and RBV after pushing changes to setpoints
Use displayQ=False to avoid printing out any such message to terminal
"""
YFEadd='MEC:LPL:LCO:0'
YFEamp=['2','3','5','6','1','4']
YFEsuf=[':SensedCurrent',':ActiveCurrent',':PowerSupply',':Temperature',':Emission_RBV',':Emission',':FaultState.RVAL']
YFElvl=[85,85,85,85,130,124];
temppvlist=[]
for ii in range(6):
temppvlist.append(EpicsSignal(YFEadd+YFEamp[ii]+YFEsuf[1]))
if IOBool:
print('Ramping currents...',end='',flush=True)
for jj in range(5):
for eachpv,pvlvl in zip(temppvlist,YFElvl):
eachpv.put(pvlvl*(jj+1)/(5.0));
time.sleep(1);print('..',end='',flush=True);
print('*')
else:
print('Zeroing currents...',end='',flush=True);
for eachpv in temppvlist:
eachpv.put(0)
time.sleep(1.5);print('*');
cls.Get(display=displayQ);
return
def Trace():
"""
Plots the voltages of the diode trace for the YFE output waveform
"""
try:
LOSC('a').pch(1)
except:
print('Failed to display trace')
return False
class PFN:
"""
Class for controlling the PFNs of the MEC Nd:glass LPL
Typical usage is as follows: PFN.[command]
Possible commands include:
:HeadENB #reads back which PFNs are currently enabled
:EnableOnly #enables PFNs of specified arms and disables PFNs of unmentioned arms
:ArmOnly #same as EnableOnly but also allows specification of the HWP (throttle) on the enabled arms
"""
def HeadENB():
"""returns array of length 4, with each entry corresponding to charge state of one pair of heads
[ABstate, EFstate, GHstate, IJstate]
0 returned if either head in the pair of heads is disabled
1 returned if both heads in the pair of heads are enabled
Example: HeadENB() returns [1,1,1,1] if AB, EF, GH, and IJ PFNs are all enabled
Example: HeadENB() returns [0,0,1,1] if only GH and IJ (east side) PFNs are enabled
"""
cAB=GLOBAL.PFNAEN.get()*GLOBAL.PFNBEN.get()
cEF=GLOBAL.PFNEEN.get()*GLOBAL.PFNFEN.get()
cGH=GLOBAL.PFNGEN.get()*GLOBAL.PFNHEN.get()
cIJ=GLOBAL.PFNIEN.get()*GLOBAL.PFNJEN.get()
return np.array([cAB,cEF,cGH,cIJ])
def EnableOnly(ArmStrQ):
"""
enables just the heads listed while disabling all the heads that are not listed
Example: EnableOnly('all') enables AB, EF, GH, and IJ PFNs
Example: EnableOnly('ABIJ') enables AB and IJ PFNs and disables EF and GH PFNs
"""
AllStrQ='ABEFGHIJ'
if ArmStrQ.lower() == 'all':
ArmStrQ = AllStrQ
ArmStrQ = ArmStrQ.upper()
GLOBAL.PFNmode.put(0)
time.sleep(2)
for ii in range(len(AllStrQ)):
if (AllStrQ[ii] in ArmStrQ):
temppv=EpicsSignal(str('MEC:PFN:CH'+str(ii+1)+':ENABLE'))
temppv.put(1)
else:
temppv=EpicsSignal(str('MEC:PFN:CH'+str(ii+1)+':ENABLE'))
temppv.put(0)
time.sleep(2);GLOBAL.PFNmode.put(1);time.sleep(3.5);GLOBAL.PFNmode.put(2);
return
@classmethod
def ArmOnly(cls,ArmStrQ,set_T=1):
"""
enables just the heads listed AND changes HWP settings of specified arms to set_T;
all the heads that are not listed are disabled (although their HWP values are not changed)
Example: ArmOnly('all') enables all PFNs and sets all HWPs for full transmission
Example: ArmOnly('EFGH',set_T=0.5) enables EF and GH PFNs, disables AB and IJ PFNs,
and sets EF and GH HWPs for 50% transmission (AB and IJ HWPs are not touched)
"""
HWP.On(ArmStrQ,set_T=set_T)
cls.EnableOnly(ArmStrQ)
return
class HWP:
"""
Class for controlling the HWPs of the MEC Nd:glass LPL
Typical usage is as follows: HWP.[command]
Possible commands include:
:On #sets the desired transmission level of the specified arms
:Status #prints current HWP settings and returns an array of anticipated transmission levels
:ClearStart #attempts to clear motor restart for AB and EF HWPs
:HWP_opt #attempts to calibrate the HWP settings such that 0deg corresponds to full transmission
"""
def On(ArmStrQ,set_T=1):
"""
Adjusts the waveplates of the specified arms in order to provide the specified transmission
Example: On('all') sets the transmission for AB, EF, GH, and IJ to 100% (default for set_T is 1)
Example: On('ABIJ',set_T=0.75) sets the transmission for AB and IJ to 75%
"""
armlist=['AB','EF','GH','IJ'];
if (set_T<0) or (set_T>1):
set_T=1;print('Error: set_T must be between 0 and 1! Using set_T=1 instead...')
if ArmStrQ.lower() == 'all':
ArmStrQ = 'ABEFGHIJ'
HWPpvlist=[GLOBAL.HWPAB,GLOBAL.HWPEF,GLOBAL.HWPGH,GLOBAL.HWPIJ];
motor_angle=np.arccos(np.sqrt(set_T))*(180/np.pi)*0.5
print('Moving waveplates '+ArmStrQ+' to '+str(round(motor_angle,4))+'deg...');
ABchk=('A' in ArmStrQ) or ('a' in ArmStrQ) or ('B' in ArmStrQ) or ('b' in ArmStrQ);
EFchk=('E' in ArmStrQ) or ('e' in ArmStrQ) or ('F' in ArmStrQ) or ('f' in ArmStrQ);
GHchk=('G' in ArmStrQ) or ('g' in ArmStrQ) or ('H' in ArmStrQ) or ('h' in ArmStrQ);
IJchk=('I' in ArmStrQ) or ('i' in ArmStrQ) or ('J' in ArmStrQ) or ('j' in ArmStrQ);
chklist=[ABchk,EFchk,GHchk,IJchk];
HWPoldrbvlist=[GLOBAL.HWPAB.get(),GLOBAL.HWPEF.get(),GLOBAL.HWPGH.get(),GLOBAL.HWPIJ.get()];
for ii in range(4):
if chklist[ii]:
HWPpvlist[ii].put(motor_angle)
time.sleep(2)
for ii in range(4):
chklist[ii] = chklist[ii] and ((np.abs(HWPoldrbvlist[ii]-HWPpvlist[ii].get()) < 0.01) and (np.abs(motor_angle-HWPpvlist[ii].get()) > 0.01));
retryii=0;
while any(chklist):
retryii+=1;
for ii in range(4):
if chklist[ii]:
HWPpvlist[ii].put(motor_angle);print('HWP issue detected on '+armlist[ii]+'. Re-trying...');
time.sleep(2)
chklist[ii] = chklist[ii] and ((np.abs(HWPoldrbvlist[ii]-HWPpvlist[ii].get()) < 0.01) and (np.abs(motor_angle-HWPpvlist[ii].get()) > 0.01)) and (retryii<3);
if any(chklist):
for ii in range(4):
if chklist[ii]:
print('Re-try on '+armlist[ii]+' failed!')
return
def Status():
"""
prints current HWP settings to terminal and returns an array of anticipated transmission levels
Example: if all HWPs are set to 0deg, returned array has form [1,1,1,1] showing 100% transmission on all arms
Example: if all AB and IJ HWPs are set to 22.5deg, and EF and GH HWPs are set to 45deg,
returned array has form [0.5,0,0,0.5] showing AB and IJ HWPs are set for 50% transmission,
and EF and GH are set for 0% transmission
(Both examples will also include a print-out to terminal showing current angle setting and corresponding transmission)
"""
armlist=['AB','EF','GH','IJ'];
HWPpvlist=[GLOBAL.HWPAB,GLOBAL.HWPEF,GLOBAL.HWPGH,GLOBAL.HWPIJ];
set_Tlist=[round((np.cos(2*motor_angleRBV.get()*(np.pi/180)))**2,4) for motor_angleRBV in HWPpvlist]
print('Current waveplate settings:');
for ii in range(4):
print(armlist[ii]+': '+str(round(HWPpvlist[ii].get(),1))+'deg --> T~'+str(round(100*set_Tlist[ii],1))+'%')
return set_Tlist
def ClearStart():
"""
attempts to clear motor restart for AB and EF HWPs (running on MFORCE chassis)
(no equivalent function for GH and IJ waveplates yet (running on Newport crate))
"""
resetPVs=[GLOBAL.HWPABclr, GLOBAL.HWPEFclr]#no equivalent for GH and IJ yet...
try:
for eapv in resetPVs:
eapv.put(1)
except:
print('Failed!')
def HWP_opt(armsQ='ABEFGHIJ'):#check for trace height;#All shutters must start in the open state...
"""
Like SHG_opt() but instead optimizes the HWP angles to set maximum transmission at 0deg
(HWP_opt is really only useful if someone manually rotates the knob inside the enclosure, so usage is very infrequent)
Prompts included to help insure system is in appropriate state for optimization
Entire function takes several minutes (longer than SHG_opt); live display allows you to monitor progress
If new value is too close to edge of window, it is recommended to optimize that arm again
(If the angles are way far off (no detection), tune the Newport motor back until it is closer)
If leaving armsQ blank: all arms are optimized in the order of AB, EF, GH, IJ
:EXAMPLE: HWP_opt() is equivalent to HWP_opt('ABEFGHIJ') and optimizes all arms
Instead, one may choose specific arms to optimize
:EXAMPLE: HWP_opt(armsQ='ABIJ') or HWP_opt('ABIJ') optimizes only arms AB and IJ
:EXAMPLE: HWP_opt(armsQ='EF') or HWP_opt('EF') optimizes only arm EF
"""
print('Running this routine requires ALL TTL shutters to begin in the open state! The YFE must be on with the bias dither initially enabled!')
if np.sum(TTL_shutter.Status(display=False)[-1]) > 0:
print('Warning! The shutters don\'t all appear to be open! ',end='',flush=True);TTL_shutter.Status(display=True);
else:
print('(Shutters seem OK...)')
if not YFE.OnCheck(display=False):
print('Warning! The YFE doesn\'t appear to be on! ',end='',flush=True);YFE.OnCheck(display=True);
else:
print('(YFE emission seems OK...)')
if np.sum(YFE.Get(display=False)) < 550:
print('Warning! The YFE doesn\'t appear to be turned up! ');YFE.Get(display=True);
else:
print('(YFE current seems OK...)')
if MBC.ModeCheck() != 0:
print('(Warning! The MBC doesn\'t appear to be in AUTO mode!')
else:
print('(MBC mode seems OK...)')
print('Are you sure you are ready to proceed? [enter y/n]',end='',flush=True)
checkprompt=efc.getch_with_TO(TOsec=10,display=False);
if checkprompt not in ('y','Y'):
print('Try again later then!');
return
else:
print('OK, I hope you know what you\'re doing!')
HWPpvlist=[GLOBAL.HWPAB, GLOBAL.HWPEF, GLOBAL.HWPGH, GLOBAL.HWPIJ];
HWP.On('all',set_T=1)
currHWPrbv=np.sum([np.abs(HWP_rbv.get()) for HWP_rbv in HWPpvlist])
if currHWPrbv > 10:
hwploopiter=0
while currHWPrbv > 1:
time.sleep(1)
currHWPrbv=np.sum([np.abs(HWP_rbv.get()) for HWP_rbv in HWPpvlist])
hwploopiter+=1
if hwploopiter > 10:
print('HWP settings do not seem to be settling at 0deg! Please try again later!')
return False
armlist=['AB','EF','GH','IJ']
#YFEoff();YFEon();
GLOBAL.MBCmode.put(1)#set MAN mode on MBC
if np.sum(YFE.Get(display=False)) < 100:
print('Check YFE before optimizing!')
optwvfm=pickle.load(open(GLOBAL.PSFILEPATH+'opttrace.p','rb'));
try:
oldwvfm=HAWG().ReadPulseHeights();
HAWG().WritePulseHeights(optwvfm);
except:
print('Failed to read old waveform/load new waveform!')
return
print('Closing all shutters...')
TTL_shutter.Toggle('closeall',display=False);#close all the shutters
time.sleep(4)
GLOBAL.EVRLPLSSEC.put(43);GLOBAL.EVRLPLSSEN.put(1);#enable these...
try:
tempchk1=LOSC('a').rch(1);time.sleep(.15);tempchk2=LOSC('a').rch(1);
if np.sum(np.abs(tempchk1-tempchk2))<1e-6:
print('Warning: scope trace doesn\'t appear to be updating, please check scope! Abort? [enter y/n]')
checkprompt=efc.getch_with_TO(TOsec=10,display=False);
if checkprompt not in ('y','Y'):
print('Try again later then!');
HAWG().WritePulseHeights(oldwvfm);
return
else:
print('OK, I hope you know what you\'re doing!')
except:
print('Scope error, check scope status! Aborting...')
HAWG().WritePulseHeights(oldwvfm);
return
startposlist=[HWPrbv.get() for HWPrbv in HWPpvlist];
newposlist=startposlist[:]
stepQ=1.0;rangeQ=20.0;
for ii in range(4):
if armlist[ii] in armsQ:#only prep the stage if it's going to be used
HWPpvlist[ii].put(startposlist[ii]+(-rangeQ+stepQ*0))
currentshutter=0;#trying to re-open a shutter in case of failure...
#set up all the plotting stuff
plt.ion()
fig,axs=plt.subplots(2,2,gridspec_kw={'hspace':0.4,'wspace':0.3})
xdat=[[startposlist[ii]+(-rangeQ+stepQ*(jj)) for jj in range(int(1+(2*rangeQ/stepQ)))] for ii in range(4)]
ydat=[[0]*int(1+(2*rangeQ/stepQ)) for ii in range(4)]
ax1,=axs[0,0].plot(xdat[0],ydat[0]); axs[0,0].set_xlabel('AB'); plt.pause(0.01);
ax2,=axs[0,1].plot(xdat[1],ydat[1]); axs[0,1].set_xlabel('EF'); plt.pause(0.01);
ax3,=axs[1,0].plot(xdat[2],ydat[2]); axs[1,0].set_xlabel('GH'); plt.pause(0.01);
ax4,=axs[1,1].plot(xdat[3],ydat[3]); axs[1,1].set_xlabel('IJ'); plt.pause(0.01);
axss=[ax1,ax2,ax3,ax4]
try:
SLA=LOSC('A');SLA._Open();#changed to LecroyA since repair
for ii in range(4):
if armlist[ii] in armsQ:
print('Begin optimizing '+armlist[ii]+'... ',end='',flush=True);
hwparmdatax,hwparmdatay=[],[]
TTL_shutter.Toggle('open'+armlist[ii],display=False);currentshutter=ii;time.sleep(4);print('Shutter opened!');#open one shutter
for jj in range(int(1+(2*rangeQ/stepQ))):
print('.',end='',flush=True)
HWPpvlist[ii].put(startposlist[ii]+(-rangeQ+stepQ*(jj)));time.sleep(4);#step to new position#was 2.5
curr_x=HWPpvlist[ii].get();curr_y=np.max(SLA._rch(3));time.sleep(.15);#in testing, max is more stable than sum
if curr_y > 0.005:#threshold so don't skew fit with noise; max is ~~10x this
hwparmdatax.append(curr_x);hwparmdatay.append(curr_y);#save x and y
print('.',end='',flush=True)
ydat[ii][jj]=curr_y
axss[ii].set_data(xdat[ii],ydat[ii])
axs[ii//2,ii%2].set_ylim((min(ydat[ii]),max(ydat[ii])))
plt.pause(0.01)
#axs[ii//2,ii%2].autoscale(True,True,True)
fig.canvas.draw_idle()
plt.pause(0.01)
print('*')
qfit=np.polyfit(hwparmdatax,hwparmdatay,2);newpos=qfit[1]/(-2*qfit[0]);#find fit and new max
if np.abs(startposlist[ii]-newpos)<0.85*rangeQ:
HWPpvlist[ii].put(newpos);newposlist[ii]=newpos;
print('HWP position on arm '+armlist[ii]+' changed from '+str(round(startposlist[ii],4))+' to '+str(round(newpos,4)))
else:
print('Failed! New HWP position on arm '+armlist[ii]+' seems too far off... '+str(round(newpos,4))+' from '+str(round(startposlist[ii],4))+'... Restoring...')
HWPpvlist[ii].put(startposlist[ii])
TTL_shutter.Toggle('close'+armlist[ii],display=False);currentshutter=0;#close that shutter;
#xpq=np.arange(startposlist[ii]+(-rangeQ+stepQ*(-1)),startposlist[ii]+(-rangeQ+stepQ*int(1+(2*rangeQ/stepQ))),.1);
qfitp=np.poly1d(qfit);
axs[ii//2,ii%2].plot(xdat[ii],qfitp(xdat[ii]))
axs[ii//2,ii%2].relim();plt.pause(0.01);
axs[ii//2,ii%2].autoscale(True,True,True)
fig.canvas.draw_idle()
plt.pause(0.01)
#ep.llxy([[hwparmdatax,hwparmdatay],[xpq,qfitp(xpq)]],xlb=armlist[ii])
else:
print('Skipping '+armlist[ii]+'...')
pass
SLA._Close();time.sleep(.15);#changed to LeCroyA
except:
print('Failed! Restoring original values and attempting to re-open most-recent shutter... you should verify!')
SLA._Close();time.sleep(.15);#changed to LeCroyA
if currentshutter > 0:
TTL_shutter.Toggle('open'+armlist[currentshutter],display=False);
for ii in range(4):
HWPpvlist[ii].put(startposlist[ii]);newposlist[ii]=startposlist[ii];
time.sleep(2);#need time so that last shutter trigger ends before trying to open IJ
try:
HAWG().WritePulseHeights(oldwvfm);
except:
print('Error! Check waveform!')
GLOBAL.EVRLPLSSEN.put(0);#disable PC before re-opening shutters
datestamp=int(datetime.now().strftime('%Y%m%d%H%M%S'))
HWPlog=pickle.load(open(GLOBAL.PSFILEPATH+'HWP_opt_log.p','rb'))
HWPlog.append([datestamp,[newposlist[ii] for ii in range(4)]])
efc.pickledump2(HWPlog,GLOBAL.PSFILEPATH+'HWP_opt_log.p')
TTL_shutter.Toggle('openall',display=False);#open all the shutters
MBC.Reset();YFE.SetAll(True);#reset bias...
plt.ioff()
motnamelist=[GLOBAL.HWPABoff, GLOBAL.HWPEFoff, GLOBAL.HWPGHoff, GLOBAL.HWPIJoff]#.OFF
#add adjustment to offset automatically....
for temppv in motnamelist:
tempval=temppv.get();
temppv.put(tempval-newposlist[ii]);
class Stage:
"""
Stores functions related to stages and motors
Current functions include:
:NewportInitRefAll #useful routine for recovering from Newport outage -- initializes and references all channels
:NewportBrowserCtrl #internal function meant for launching the Newport broswer controller
Potential future work:
-SmarAct motor utilities
-motor setting snapshots
-motor setting restoration
"""
def NewportInitRefAll():
ipvlist=[GLOBAL.XPS1IALL, GLOBAL.XPS2IALL, GLOBAL.XPS3IALL, GLOBAL.XPS4IALL]
rpvlist=[GLOBAL.XPS1RALL, GLOBAL.XPS2RALL, GLOBAL.XPS3RALL, GLOBAL.XPS4RALL]
try:
for eapv in ipvlist:
eapv.put(1)
efc.dotsleep(10)
for eapv in rpvlist:
eapv.put(1)
except:
print('Failed!')
def NewportBrowserCtrl(inpvnam):
"""
internal function meant for launching the Newport broswer controller
takes a single Newport motor PV as input, opens a firefox session of the crate with the corresponding XPS driver
Example: Stage.NewportBrowserCtrl('MEC:LAS:MMN:25.RBV') opens the admin controls for mcn-mec-las4 in a firefox browser
"""
pvno=re.findall('^MEC:(LAS|PPL):MMN:([\d]+)',inpvnam);
if len(pvno)>0:
try:
pvnoint=int(pvno[0][1])
#print('PV number int: {}, attempting to read {}'.format(pvnoint,'MEC:LAS:MMN_{:02}{:02}.CTRL'.format((8*(pvnoint//8))+1,8*((pvnoint//8)+1))))
ipaddr=efc.rPV('MEC:{}:MMN_{:02}{:02}.CTRL'.format(pvno[0][0],(8*(pvnoint//8))+1,8*((pvnoint//8)+1)))
#print('IP address shorthand: {}'.format(ipaddr))
rawipaddr=re.findall('IP: (.+)\n', os.popen('netconfig search '+ipaddr).read());
#print('Raw IP address shorthand: {}'.format(rawipaddr[0]))
#print('firefox --new-window http://{}'.format(rawipaddr[0]))
os.system('firefox --new-window http://{}'.format(rawipaddr[0]))
print('Reminder: username/password are Administrator/Administrator')
except:
print('Process failed!')
return False
else:
print('Bad PV, please try again!')
class Timing:
"""
Potential future class containing timing-related utilities such as:
- EVR snapshot and refresh functions
- fstiming utilities
- Nstiming utilities
- Vitara utilities
"""
pass
class CAM:
"""
Class mostly containing utilities for GigE cameras for MEC
Typical usage via CAM.[command]
List of possible commands includes:
:Name #helps with names and naming conventions of all cameras in MEC
:View #quickly view a single camera or multiple cameras in a python window
:QuickSave #saves the data from a specified GigE camera to a PNG image
:QuickSaveData #saves the data from a specified GigE camera to a 2D txt file
:Config #configures plug-ins for a given camera
:ConfigReset #refreshes all current camera configurations
Potential future work:
- save images to file easily
- (also in connection with scan parameters)
- GigE_toggle_trigger for configuring SPL camera triggers
- setdynxhair for quickly setting a custom temporary crosshair position, etc.
- combined utility for tuning a beam while watching a live image of the camera
"""
def Name(GIGEnam='none',returnAll=False):
"""
There are three types of camera names: the PV Name, the SPL CAM Name, and the NickName
This function helps display the table of names and also translates NickNames and SPL CAM Names
into the PV Names, which are needed to run the different camera viewer utilities (etc.)
With no argument given, Name() returns a list of names of all cameras
Example: Name() prints out a table with all supported GigE cameras -- something like this:
Camera NickName SPL CAM Name PV Name Motor
Legend or Regen MEC_SPL_3 MEC:GIGE:24 SM0
StrInA or TopasA MEC_SPL_1 MEC:GIGE:22 SM1
StrInB or TopasB MEC_SPL_7 MEC:GIGE:28 SM2
MPA1In or MPA1A MEC_SPL_2 MEC:GIGE:23 SM3
MPA1Out or MPA1B MEC_SPL_4 MEC:GIGE:25 SM4
MPA2In or MPA2A MEC_SPL_5 MEC:GIGE:26 SM5
MPA2Xtal or MPA2F GigE17_TimeTool_Diag MEC:GIGE:17 SM6
MPA2Out or MPA2B MEC_SPL_6 MEC:GIGE:27 SM7
CompIn or CompA MEC_SPL_9 MEC:GIGE:29 SM8
CompOutFF or CompBFF MEC_SPL_10 MEC:GIGE:30 XPS Mid
CompOutNF or CompBNF MEC_SPL_11 MEC:GIGE:31 XPS In
Trap or Mousetrap MEC_SPL_8 MEC:GIGE:16 None
SPFloat1 or SPFloater1 MEC_SPL_12 MEC:GIGE:32
CompInNF or CompANF MEC_SPL_13 MEC:GIGE:33
TTIn or TTA MEC_SPL_14 MEC:GIGE:34
TTOut or TTB MEC_SPL_15 MEC:GIGE:35
For every PV name (e.g. 'MEC:GIGE:24'), there is a corresponding SPL CAM Name (e.g. 'MEC_SPL_3')
and two Camera NickNames (e.g. 'Legend' and 'Regen'); there is also a motor that is
used to tune the beam to the crosshair shown on the camera's image (e.g. SmarAct SM0)
With a single argument for GIGEnam given, function returns the corresponding PV name
:EXAMPLE: Name('Legend') and Name('Regen') and Name('MEC_SPL_3') all return 'MEC:GIGE:24'
***If hacking the function to use a non-MEC laser camera, use GIGEnam='custom:NAME:OF:GIGE:PV:HEAD'***
Option: using returnAll=True will return not only the PV Name but also the first Camera NickName
and the SPL CAM Name in an array of format [NickName_0, SPL CAM Name, PV Name]
: using returnAll=False is the typical usage that returns just the PV Name
"""
NickNameList=[['Legend','Regen'], ['StrInA','TopasA'], ['StrInB','TopasB'], ['MPA1In','MPA1A'], ['MPA1Out','MPA1B'], ['MPA2In','MPA2A'], ['MPA2Xtal','MPA2F'],
['MPA2Out','MPA2B'], ['CompIn', 'CompA'], ['CompOutFF', 'CompBFF'], ['CompOutNF', 'CompBNF'], ['Trap', 'Mousetrap'],
['SPFloat1','SPFloater1'], ['CompInNF','CompANF'], ['TTIn','TTA'], ['TTOut','TTB']]
SPLNameList=['MEC_SPL_3', 'MEC_SPL_1', 'MEC_SPL_7', 'MEC_SPL_2', 'MEC_SPL_4', 'MEC_SPL_5', 'GigE17_TimeTool_Diag',
'MEC_SPL_6', 'MEC_SPL_9', 'MEC_SPL_10', 'MEC_SPL_11', 'MEC_SPL_8',
'MEC_SPL_12', 'MEC_SPL_13', 'MEC_SPL_14', 'MEC_SPL_15']
PVNameList=['MEC:GIGE:24', 'MEC:GIGE:22', 'MEC:GIGE:28', 'MEC:GIGE:23', 'MEC:GIGE:25', 'MEC:GIGE:26', 'MEC:GIGE:17',
'MEC:GIGE:27', 'MEC:GIGE:29', 'MEC:GIGE:30', 'MEC:GIGE:31', 'MEC:GIGE:16',
'MEC:GIGE:32', 'MEC:GIGE:33', 'MEC:GIGE:34', "MEC:GIGE:35"]
SmarActList=['SM0','SM1','SM2','SM3','SM4','SM5','SM6','SM7','SM8','XPS Mid','XPS In','None',
'(placeholder)', '(placeholder)', '(placeholder)', '(placeholder)']
pos=np.where(np.array([list(map(lambda name: name.casefold(), NickName)) for NickName in np.array(NickNameList)])==GIGEnam.casefold())
if len(pos[0]) > 0:
if returnAll==True:
return [NickNameList[pos[0][0]][0], SPLNameList[pos[0][0]], PVNameList[pos[0][0]]]
else:
return PVNameList[pos[0][0]]
pos2=np.where(np.array(list(map(lambda name: name.casefold(), np.array(SPLNameList))))==GIGEnam.casefold())
if len(pos2[0]) > 0:
if returnAll==True:
return [NickNameList[pos2[0][0]][0], SPLNameList[pos2[0][0]], PVNameList[pos2[0][0]]]
else:
return PVNameList[pos2[0][0]]
pos3=np.where(np.array(list(map(lambda name: name.casefold(), np.array(PVNameList))))==GIGEnam.casefold())
if len(pos3[0]) > 0:
if returnAll==True:
return [NickNameList[pos3[0][0]][0], SPLNameList[pos3[0][0]], PVNameList[pos3[0][0]]]
else:
return PVNameList[pos3[0][0]]
else:
if GIGEnam[:7]=='custom:':
#print("Using custom camera name: "+GIGEnam[7:])
if returnAll==True:
return [GIGEnam[7:],'','']
else:
return GIGEnam[7:]
else:
print('{:<30} {:<21} {:<11} {:<10}'.format('Camera NickName', 'SPL CAM Name', 'PV Name', 'Motor'))
for ii in range(len(NickNameList)):
print('{:<10} or {:<12} {:<21} {:<11} {:<10}'.format(str(NickNameList[ii][0]), str(NickNameList[ii][1]), str(SPLNameList[ii]), str(PVNameList[ii]), str(SmarActList[ii])))
return False
@classmethod
def Jitter(cls,CAMreq,AcqNum=100,RepRateHz=-1,BeamSig=1):
"""
Utility for taking a quick beam pointing stability measurement on a GigE camera.
Prints several statistics and generates plot for centroid jitter relative to the
average position, normalized by the spatial beam dimensions sigma_X and sigma_Y
Parameters
----------
CAMreq : supply the name of the camera using one of the options in CAM.Name(),
e.g. 'Regen' or 'CompOutFF'
AcqNum : number of acquisitions in the scan; the default is 100
RepRateHz : anticipated repetition rate of the GigE camera in Hz, e.g. RepRateHz=5 for 5Hz operation;
the default is -1, which instead looks up the current refresh rate using the ArrayRate_RBV PV
BeamSig : allows custom multiplicative definition of beam sigma relative to the standard
deviation, with the default of 1; e.g. BeamSig=2.355 would calculate sigma as the FWHM instead
Returns
-------
[centoid_X, centroid_Y, sigma_X, sigma_Y]
"""
PVhead=cls.Name(CAMreq)
efc.wPV('{}:Acquire'.format(PVhead),1)#tries to start the camera first
if RepRateHz<=0:
RepRateHz=efc.rPV('{}:ArrayRate_RBV'.format(PVhead))#tries to start the camera first
cX=[]; cY=[]; sX=[]; sY=[];
for ii in range(AcqNum):
if (ii+1)%int(AcqNum//5) == 0:
print('Shot number: {}, Percent complete: {}'.format(str(ii+1), str((ii+1)/AcqNum)))
cX.append(efc.rPV('{}:Stats2:CentroidX_RBV'.format(PVhead)))
cY.append(efc.rPV('{}:Stats2:CentroidY_RBV'.format(PVhead)))
sX.append(BeamSig*efc.rPV('{}:Stats2:SigmaX_RBV'.format(PVhead)))
sY.append(BeamSig*efc.rPV('{}:Stats2:SigmaY_RBV'.format(PVhead)))
time.sleep(1/RepRateHz)
fracX=(np.array(cX) - np.mean(cX)) / np.array(sX);
fracY=(np.array(cY) - np.mean(cY)) / np.array(sY);
fracTOT=np.sqrt(fracX**2 + fracY**2)
print('Number of shots: {}, beam sigma: {} x sigma'.format(AcqNum,BeamSig))
print('Average centroid_X (cX) and centroid_Y (cY) in pixels: cXavg={}px and cYavg={}px'.format(np.mean(cX),np.mean(cY)))
print('Average sigma_x (sX) and sigma_y (sY) in pixels: sXavg={}px and sYavg={}px'.format(np.mean(sX),np.mean(sY)))
print('StDev of cX relative to cXavg as \% of sX, i.e. fracX = (cX - cXavg)/sXavg): {}\%'.format(np.std(fracX)))
print('StDev of cY relative to cYavg as \% of sY, i.e. fracY = (cY - cYavg)/sYavg): {}\%'.format(np.std(fracY)))
print('StDev of total fractional relative drift, i.e. fracTOT=np.sqrt(fracX**2 + fracY**2): {}/%'.format(fracTOT))
ep.ll([fracX, fracY, fracTOT])
return [cX, cY, sX, sY]
@classmethod
def _QuickView(cls,CAMreq,ImageNo=2,LIVE=False,MAXLOOPS=25,endfreeze=False,reConfig=False):#like MEC:GIGE:31
"""
returns 2D plot of the camera specified in CAMreq
specify CAMreq using a Camera NickName, SPL Cam Name, or PV Name (i.e. from Name())
Option: ImageNo should be 2 typically but could be set to 1 potentially
(recall that GigE IOCs produce multiple image streams)
(for AD cameras, try ImageNo=0 to trying talking to their ancient decrepit IOCs)
Option: use LIVE=True to show a live image rather than a still image with LIVE=False
Option: if LIVE=True then use MAXLOOPS to set the number of loops on the live view before finishing
Option: reConfig=True means that the CAM.Config function will be re-executed at the conclusion of the loop
: reConfig=False means that CAM.Config will not execute; tries to be as "read-only" as possible
: (advisable to set reConfig=False when hacking for use with non-MEC cameras!!)
Option: endfreeze=True means that the acquisition is stopped after reaching MAXLOOPS
: endfreeze=False means that the acquisition will continue even after the live view ends
Example: use _QuickView('Regen') to get just a quick current look at the regen camera output
Note: to view more than one camera at a time, use View() instead
Note: using View() with only one argument is equivalent to using _QuickView() so using View() is preferred
"""
PVhead=cls.Name(CAMreq)
if reConfig:
efc.wPV('{}:Acquire'.format(PVhead),1)#tries to start the camera first
try:#if True:#try:
if ImageNo in [1,2]:
tres1=efc.rPV(PVhead+':IMAGE'+str(ImageNo)+':ArraySize1_RBV')
tres2=efc.rPV(PVhead+':IMAGE'+str(ImageNo)+':ArraySize2_RBV')
twf=efc.rPV(PVhead+':IMAGE'+str(ImageNo)+':ArrayData')
elif ImageNo == 0:
if int(CAMreq.split(":")[-1]) in [100,90,285,295,287,297,320,423]:
tres1=efc.rPV(PVhead+':ArraySizeY_RBV')
tres2=efc.rPV(PVhead+':ArraySizeX_RBV')
twf=efc.rPV(PVhead+':Image:ArrayData')
elif int(CAMreq.split(":")[-1]) in [186,461,469]:
tres1=len(efc.rPV(PVhead+':PROJ_V'))
tres2=len(efc.rPV(PVhead+':PROJ_H'))
twf=efc.rPV(PVhead+':IMAGE')
else:
print('Could not find the camera!: '+CAMreq)
else:
print('Invalid ImageNo!: '+str(ImageNo))
return False
if len(twf) != tres1*tres2:
twf=list(twf)+(tres1*tres2-len(twf))*[0]
fig,axs=plt.subplots(1,1)
ax1=axs.imshow(np.array_split(np.array(twf),tres1));
axs.axes.xaxis.set_ticklabels([]);
axs.axes.yaxis.set_ticklabels([]);
axs.tick_params(direction='in');
axs.set_ylabel(CAMreq);
except:#if True:#except:
print('Failed!')
return
fig.tight_layout()
fig.show();
waittime=.01;
plt.pause(waittime);
time.sleep(waittime)
loopcount=0
if LIVE:
while loopcount<MAXLOOPS:
try:
if reConfig:
cls.Config(CAMreq,LIVE=True)
efc.wPV('{}:Acquire'.format(PVhead),1)
twf=efc.rPV(PVhead+':IMAGE'+str(ImageNo)+':ArrayData')
if ImageNo in [1,2]:
twf=efc.rPV(PVhead+':IMAGE'+str(ImageNo)+':ArrayData')
else:
if int(CAMreq.split(":")[-1]) in [100,90,285,295,287,297,320,423]:
twf=efc.rPV(PVhead+':Image:ArrayData')
elif int(CAMreq.split(":")[-1]) in [186,461,469]:
twf=efc.rPV(PVhead+':IMAGE')
else:
print('Could not find the camera!: '+CAMreq)
if PVhead == 'MEC:GIGE:29':
tres1=efc.rPV(PVhead+':IMAGE'+str(ImageNo)+':ArraySize1_RBV')
tres2=efc.rPV(PVhead+':IMAGE'+str(ImageNo)+':ArraySize0_RBV')
tres3=efc.rPV(PVhead+':IMAGE'+str(ImageNo)+':ArraySize2_RBV')
tresL=sorted([tres1,tres2,tres3],reverse=True)
if len(twf) != tresL[0]*tresL[1]:
twf = list(twf) + (tresL[0]*tresL[1] - len(twf))*[0]
tres1=tres3
ax1.set_data(np.array_split(np.array(twf),tres1));
fig.canvas.draw_idle()
plt.pause(waittime)
time.sleep(waittime)
except:
print('Error occured when plotting {}!'.format(CAMreq))
loopcount+=1
if reConfig:
cls.Config(CAMreq,LIVE=False)
if endfreeze == False:
efc.wPV('{}:Acquire'.format(PVhead),1)
return
@classmethod
def QuickSave(cls,CAMreq,ImageNo=2,FileNameQ='default'):#like MEC:GIGE:31
"""
saves 2D plot of the camera specified in CAMreq without trying to change acquisition status
specify CAMreq using a Camera NickName, SPL Cam Name, or PV Name (i.e. from Name())
Option: ImageNo should be 2 typically but could be set to 1 potentially
(recall that GigE IOCs produce multiple image streams)
(for AD cameras, try ImageNo=0 to trying talking to their ancient decrepit IOCs)
Option: Set the labels for the x-axis and y-axis with xlb and ylb, respectively
Option: Set the limits for the x-axis and y-axis with xlim and ylim, respectively
"""
PVhead=cls.Name(CAMreq)
try:#if True:#try:
if ImageNo in [1,2]:
tres1=efc.rPV(PVhead+':IMAGE'+str(ImageNo)+':ArraySize1_RBV')
tres2=efc.rPV(PVhead+':IMAGE'+str(ImageNo)+':ArraySize2_RBV')
twf=efc.rPV(PVhead+':IMAGE'+str(ImageNo)+':ArrayData')
elif ImageNo == 0:
if int(CAMreq.split(":")[-1]) in [100,90,285,295,287,297,320,423]:
tres1=efc.rPV(PVhead+':ArraySizeY_RBV')
tres2=efc.rPV(PVhead+':ArraySizeX_RBV')
twf=efc.rPV(PVhead+':Image:ArrayData')
elif int(CAMreq.split(":")[-1]) in [186,461,469]:
tres1=len(efc.rPV(PVhead+':PROJ_V'))
tres2=len(efc.rPV(PVhead+':PROJ_H'))
twf=efc.rPV(PVhead+':IMAGE')
else:
print('Could not find the camera!: '+CAMreq)
else:
print('Invalid ImageNo!: '+str(ImageNo))
return False
if len(twf) != tres1*tres2:
twf=list(twf)+(tres1*tres2-len(twf))*[0]
fig,axs=plt.subplots(1,1)
ax1=axs.imshow(np.array_split(np.array(twf),tres1));
axs.axes.xaxis.set_ticklabels([]);
axs.axes.yaxis.set_ticklabels([]);
axs.tick_params(direction='in');
axs.set_ylabel(CAMreq);
except:#if True:#except:
print('Failed!')
return
fig.tight_layout()
if FileNameQ=='default':
FileNameQ='GigE_'+cls.Name(CAMreq,returnAll=True)[0]
try:
fig.savefig(GLOBAL.PSFILEPATH+FileNameQ+'_'+datetime.now().strftime('%Y%m%d.%H%M%S')+'.png');
print('File saved as '+GLOBAL.PSFILEPATH+FileNameQ+'_'+datetime.now().strftime('%Y%m%d_%H%M%S')+'.png')
except:
print('Save failed!')
return
@classmethod
def QuickSaveData(cls,CAMreq,ImageNo=2,FileNameQ='default'):#like MEC:GIGE:31
"""
saves 2D array of the camera specified in CAMreq without trying to change acquisition status
specify CAMreq using a Camera NickName, SPL Cam Name, or PV Name (i.e. from Name())
Option: ImageNo should be 2 typically but could be set to 1 potentially
(recall that GigE IOCs produce multiple image streams)
(for AD cameras, try ImageNo=0 to trying talking to their ancient decrepit IOCs)
"""
PVhead=cls.Name(CAMreq)
try:#if True:#try:
if ImageNo in [1,2]:
tres1=efc.rPV(PVhead+':IMAGE'+str(ImageNo)+':ArraySize1_RBV')
tres2=efc.rPV(PVhead+':IMAGE'+str(ImageNo)+':ArraySize2_RBV')
twf=efc.rPV(PVhead+':IMAGE'+str(ImageNo)+':ArrayData')
elif ImageNo == 0:
if int(CAMreq.split(":")[-1]) in [100,90,285,295,287,297,320,423]:
tres1=efc.rPV(PVhead+':ArraySizeY_RBV')
tres2=efc.rPV(PVhead+':ArraySizeX_RBV')
twf=efc.rPV(PVhead+':Image:ArrayData')
elif int(CAMreq.split(":")[-1]) in [186,461,469]:
tres1=len(efc.rPV(PVhead+':PROJ_V'))
tres2=len(efc.rPV(PVhead+':PROJ_H'))
twf=efc.rPV(PVhead+':IMAGE')
else:
print('Could not find the camera!: '+CAMreq)
else:
print('Invalid ImageNo!: '+str(ImageNo))
return False
if len(twf) != tres1*tres2:
twf=list(twf)+(tres1*tres2-len(twf))*[0]
dat2d=np.array_split(np.array(twf),tres1);
except:#if True:#except:
print('Failed!')
return
if FileNameQ=='default':
FileNameQ='GigE_'+cls.Name(CAMreq,returnAll=True)[0]
try:
np.savetxt(GLOBAL.PSFILEPATH+FileNameQ+'_'+datetime.now().strftime('%Y%m%d.%H%M%S')+'.txt', dat2d);
print('Array file saved as '+GLOBAL.PSFILEPATH+FileNameQ+'_'+datetime.now().strftime('%Y%m%d_%H%M%S')+'.txt')
except:
print('Array save failed!')
return
@classmethod
def View(cls, *CAMargs,ImageNo=2,LIVE=False,MAXLOOPS=10,endfreeze=False,reConfig=False):
"""
returns 2D plot of the cameras specified in *CAMargs
specify *CAMargs using a Camera NickName, SPL Cam Name, or PV Name, listing all desired cameras
separated only by commas (see Names() list for options)
Example: View('Legend','StrInA','StrInB') plots a static view of the 'Legend', 'StrInA', and
'StrInB' cameras in subplots within a single frame
Example: View('all') plots a static view of all cameras in subplots within a single frame
('all' is equivalent to 'Regen', 'Trap', 'StrInA', 'StrInB', 'MPA1In', 'MPA1Out', 'MPA2In',
'MPA2Out', 'MPA2Xtal', 'CompIn', 'CompOutNF', and 'CompOutFF')
Note: if only one camera is specified, then View('Trap') is equivalent to _QuickView('Trap')
Note: due to bandwidth limitations and other various performance issues, refresh rate may suffer
according to how many cameras are selected!
Option: ImageNo should be 2 typically but could be set to 1 potentially
(recall that GigE IOCs produce multiple image streams)
(for AD cameras, try ImageNo=0 to trying talking to their ancient decrepit IOCs)
Option: use LIVE=True to show a live image rather than a still image with LIVE=False
Option: if LIVE=True then use MAXLOOPS to set the number of loops on the live view before finishing
Option: reConfig=True means that the CAM.Config function will be re-executed at the conclusion of the loop
: reConfig=False means that CAM.Config will not execute; tries to be as "read-only" as possible
: (advisable to set reConfig=False when hacking for use with non-MEC cameras!!)
Option: endfreeze=True means that the acquisition is stopped after reaching MAXLOOPS
: endfreeze=False means that the acquisition will continue even after the live view ends
"""
if isinstance(CAMargs[0],tuple) or isinstance(CAMargs[0],list):
CAMargs=tuple(CAMargs[0])#try to catch case of someone accidentally entering input as tuple or list
if CAMargs == ('all',):
CAMargs = ('Regen', 'Trap', 'StrInA', 'StrInB', 'MPA1In', 'MPA1Out', 'MPA2In', 'MPA2Out', 'MPA2Xtal', 'CompIn', 'CompOutNF', 'CompOutFF')
if len(CAMargs) == 1:
cls._QuickView(*CAMargs,ImageNo=ImageNo,LIVE=LIVE,MAXLOOPS=MAXLOOPS,reConfig=reConfig)
return
plt.ion()
subply=len(CAMargs)//2 + len(CAMargs)%2;subplx=2;
fig,axs=plt.subplots(subply,subplx,figsize=(5,2*subply));
axss=[];tres1L=[];tPVheadL=[]
for ii in range(len(CAMargs)):
tidx=(ii//2,ii%2) if len(CAMargs) > 2 else (ii%2)
axs[tidx].axes.xaxis.set_ticklabels([]);
axs[tidx].axes.yaxis.set_ticklabels([]);
axs[tidx].tick_params(direction='in');
axs[tidx].set_ylabel(CAMargs[ii]);
try:
tPVhead=cls.Name(CAMargs[ii])
tPVheadL.append(tPVhead);
if reConfig:
efc.wPV('{}:Acquire'.format(tPVhead),1)#tries to start each camera
if ImageNo in [1,2]:
tres1=efc.rPV(tPVhead+':IMAGE'+str(ImageNo)+':ArraySize1_RBV')
tres2=efc.rPV(tPVhead+':IMAGE'+str(ImageNo)+':ArraySize0_RBV')
tres3=efc.rPV(tPVhead+':IMAGE'+str(ImageNo)+':ArraySize2_RBV')
tresL=sorted([tres1,tres2,tres3],reverse=True)
twf=efc.rPV(tPVhead+':IMAGE'+str(ImageNo)+':ArrayData')
#
#tres1=efc.rPV(PVhead+':IMAGE'+str(ImageNo)+':ArraySize1_RBV')
#tres2=efc.rPV(PVhead+':IMAGE'+str(ImageNo)+':ArraySize2_RBV')
#twf=efc.rPV(PVhead+':IMAGE'+str(ImageNo)+':ArrayData')
elif ImageNo == 0:
if int(tPVhead.split(":")[-1]) in [100,90,285,295,287,297,320,423]:
tres1=efc.rPV(tPVhead+':ArraySizeY_RBV')
tres2=efc.rPV(tPVhead+':ArraySizeX_RBV')
twf=efc.rPV(tPVhead+':Image:ArrayData')
elif int(tPVhead.split(":")[-1]) in [186,461,469]:
tres1=len(efc.rPV(tPVhead+':PROJ_V'))
tres2=len(efc.rPV(tPVhead+':PROJ_H'))
twf=efc.rPV(tPVhead+':IMAGE')
else:
print('Could not find the camera!: '+tPVhead)
#tres3=tres2[:]
tresL=[tres1,tres2];#sorted([tres1,tres2],reverse=True)
else:
print('Invalid ImageNo!: '+str(ImageNo))
return False
if len(twf) != tresL[0]*tresL[1]:
twf = list(twf) + (tresL[0]*tresL[1] - len(twf))*[0]
tres1=tres3
tempax=axs[tidx].imshow(np.array_split(np.array(twf),tres1));
tres1L.append(tres1);
axss.append(tempax)
except:
print('Error occured when plotting {}!'.format(CAMargs[ii]))
if (len(CAMargs) > 2) and (len(CAMargs)%2 > 0):
iit=len(CAMargs)
tidx=(iit//2,iit%2)
axs[tidx].axis('off');
fig.tight_layout()
plt.show();
waittime=.01;
plt.pause(waittime);
time.sleep(waittime)
loopcount=0
if LIVE:
while loopcount<MAXLOOPS:
for ii in range(len(CAMargs)):
tidx=(ii//2,ii%2) if len(CAMargs) > 2 else (ii%2)
try:
if ImageNo in [1,2]:
twf=efc.rPV(tPVhead+':IMAGE'+str(ImageNo)+':ArrayData')
else:
if int(tPVhead.split(":")[-1]) in [100,90,285,295,287,297,320,423]:
twf=efc.rPV(tPVhead+':Image:ArrayData')
elif int(tPVhead.split(":")[-1]) in [186,461,469]:
twf=efc.rPV(tPVhead+':IMAGE')
else:
print('Could not find the camera!: '+tPVhead)
if tPVheadL[ii] == 'MEC:GIGE:29':
tres1=efc.rPV(tPVheadL[ii]+':IMAGE'+str(ImageNo)+':ArraySize1_RBV')
tres2=efc.rPV(tPVheadL[ii]+':IMAGE'+str(ImageNo)+':ArraySize0_RBV')
tres3=efc.rPV(tPVheadL[ii]+':IMAGE'+str(ImageNo)+':ArraySize2_RBV')
tresL=sorted([tres1,tres2,tres3],reverse=True)
if len(twf) != tresL[0]*tresL[1]:
twf = list(twf) + (tresL[0]*tresL[1] - len(twf))*[0]
tres1=tres3
tres1L[ii]=tres1
axss[ii].set_data(np.array_split(np.array(twf),tres1L[ii]));
fig.canvas.draw_idle()
plt.pause(waittime)
time.sleep(waittime)
except:
print('Error occured when plotting {}!'.format(CAMargs[ii]))
loopcount+=1
if reConfig:
for tPVhead in CAMargs:
cls.Config(cls.Name(tPVhead),LIVE=False)
if endfreeze == False:
efc.wPV('{}:Acquire'.format(cls.Name(tPVhead)),1)
# =============================================================================
# def _tiptilt(cls,camPVhead):#allow toggle btw xyz, getch btw step/etc, plot view of beam on CRIX:GIGE:06
# plt.ion()
# fig,axs=plt.subplots(1,1)
# if camPVhead == 'sg':
# camPV='head'; motorPVx='head'; motorPVy='head';
# else:
# print('No head found!');#return
# camPV=EpicsSignal(camPVhead+':IMAGE2:ArrayData');
# camPVres=cls.rPV(camPVhead+':IMAGE2:ArraySize1_RBV');
# pdat=np.array(camPV.get())
# ax1=axs.imshow(np.array_split(pdat,camPVres),origin='upper',cmap='prism');plt.pause(0.01);#gist_ncar
# plt.show();
# waittime=.01;
# plt.pause(waittime);
# time.sleep(waittime)
# #axs.imshow(Zref,alpha=0.1,origin='lower')
# while True:
# pdatn=np.array(camPV.get())
# if True:#not np.lib.arraysetops.setdiff1d(pdatn[0], pdat[0]):
# ax1.set_data(np.array_split(pdat,camPVres))
# fig.canvas.draw_idle()
# plt.pause(waittime)
# pdat=pdatn
# time.sleep(waittime);
# else:
# print('Same frame!')
# =============================================================================
@classmethod
def DisableOverlay(cls, CAMreq, ImageNo=2):
PVhead=cls.Name(CAMreq)
efc.wPV('{}:Acquire'.format(PVhead),0)#hopefully prevents IOC crashes, per TylerJ
for ii in range(8):#eight overlays possible
try:
#set up everything...
OverlayNameField='{}{}'.format('Box' if ii<4 else 'Cross',(ii%4)+1)
OverlayName='{}:IMAGE{}:{}'.format(PVhead,ImageNo,OverlayNameField)
efc.wPV(OverlayName+':Use', 1)#0=off,1=on
except:
print('Failed to disable overlay!')
return
@classmethod
def Config(cls, CAMreq, RefXhairXY=[-1,-1], InstructionStr='', MisalignmentTolerance=-1, ImageNo=2, RefXhairXYsize=[-1,-1],LIVE=False):
"""
configures stats, overlay plug-ins, etc. of a given camera specified by CAMreq
specify CAMreq using a Camera NickName, SPL Cam Name, or PV Name (i.e. from Name())
Option: RefXhairXY sets the desired static reference crosshair position,
for example indicating the "targeted" location for a laser beam
: default RefXhairXY=[-1,-1] indicates not to change anything, as it was set up previously
Option: InstructionStr sets the desired instruction string to be displayed in the corner of the camera image,
for example giving a helpful tip/reminder to the viewer (e.g. which motor to use for tuning, etc.)
: default InstructionStr='' indicates not to change anything, as it was set up previously
Option: MisalignmentTolerance specifies a threshold for the absolute distance (in pixels) between RefXhaiXY
and the current beam centroid; if difference > or < MisalignmentTolerance then different messages
will be printed to the output screen
: default MisalignmentTolerance=-1 indicates not to change anything, as it was set up previously
Option: RefXhairXYsize sets the desired size of the static reference crosshair position,
for example indicating the approximate size of the laser beam when centered on the "targeted" location
: default RefXhairXYsize=[-1,-1] indicates not to change anything, as it was set up previously
Option: ImageNo should be 2 typically but could be set to 1 potentially
(recall that GigE IOCs produce multiple image streams)
Option: LIVE will cause certain overlays to appear or to be hidden, depending on if it is True or False
: This is because certain chosen overlays require Python calculations that only occur while the loop is running
: In order to avoid confusion, these Python-calculated overlays are hidden when LIVE=False and visible when LIVE=True
: Examples of such overlays are the Stats, Counts, and AlignmentOK overlays
The GigE cam IOC typically supports 8 overlays on a single image; this function configures the 8 overlays as follows:
1: Ref X-hair #static reference crosshair, typically gives a target for the live beam
2: DynCentroid #dynamic centroid whose position and size follow the position and size of the laser
3: CAMname #text overlay of the name of the camera, meant to help avoid confusing cameras for each other
4: Instructions #meant to provide helpful tips to viewers, e.g. on what action to take if the beam is misaligned
5: TimeStamp #adds the current timestamp, useful for checking how recent the visible image is, etc.
6: Stats #uses Python to calculate and display centroid X and Y and sigma X and Y on-screen
7: Counts #uses Python to calculate and display camera's Max and Total counts on-screen
8: AlignmentOK #displays a conditional message depending on if the misalignment exceeds the MisalignmentTolerance or not
Nominal values for the MEC SPL camera configurations are kept in ConfigReset(); if things get screwed up (because sometimes
the IOC just has a bad day and screws everything up), you can run ConfigReset() and *hopefully* get back to normal
(not advised to use this on custom cameras unless you are REALLY brave and daring)
"""
PVhead=cls.Name(CAMreq)
if LIVE==False:
efc.wPV('{}:Acquire'.format(PVhead),0)#hopefully prevents IOC crashes, per TylerJ
ArraySize=[efc.rPV('{}:IMAGE{}:ArraySize0_RBV'.format(PVhead,ImageNo)),efc.rPV('{}:IMAGE{}:ArraySize1_RBV'.format(PVhead,ImageNo))]
NameList=['Ref X-hair','DynCentroid','CAMname','Instructions','TimeStamp','Stats','Counts',
'AlignmentOK'+str(str(MisalignmentTolerance).zfill(3) if MisalignmentTolerance > 0 else efc.rPV('{}:IMAGE{}:Cross4:Name'.format(PVhead,ImageNo))[-3:])]
ShapeList=[0,2,3,3,3,3,3,3]#0=Cross,1=Rect,2=Text,3=Ellipse;;;DOCUMENTATION WRONG!! 2=Ellipse, 3=Text!!!!
#each overlay needs the following information:
#[SizeXLink.DOL, SizeX, PositionXLink.DOL, PositionX, CenterXLink.DOL, CenterX,
# SizeYLink.DOL, SizeY, PositionYLink.DOL, PositionY, CenterYLink.DOL, CenterY,
# TimeStampFormat]
#Size affects Position/Center, so set Size first then Position/Center
#specify for each overlay in ArgsList
#in specifying SizeX or SizeY: 9999 indicates it should be set according to string length
OverlayName='{}:IMAGE{}:{}'.format(PVhead,ImageNo,'XXXX')
ArgsList=[
#Ref X-hair
[OverlayName+':SizeXDummy', RefXhairXYsize[0], '', 0, OverlayName+':CenterXDummy', RefXhairXY[0],
OverlayName+':SizeYDummy', RefXhairXYsize[1], '', 0, OverlayName+':CenterYDummy', RefXhairXY[1]],
#DynCentroid
[PVhead+':Stats2:SigmaX_RBV CP', 0, '', 0, PVhead+':Stats2:CentroidX_RBV CP', 0,
PVhead+':Stats2:SigmaY_RBV CP', 0, '', 0, PVhead+':Stats2:CentroidY_RBV CP', 0],
#CAMname
[OverlayName+':SizeXDummy', 9999, OverlayName+':PositionXDummy', 10, '', 0,
OverlayName+':SizeYDummy', 9999, OverlayName+':PositionYDummy', 10, '', 0,
'{:<10.9} {:<11.10} {:<11.11}'.format(*cls.Name(CAMreq,returnAll=True))],
#Instructions
[OverlayName+':SizeXDummy', 9999, OverlayName+':PositionXDummy', 10, '', 0,
OverlayName+':SizeYDummy', 9999, OverlayName+':PositionYDummy', -20+ArraySize[1], '', 0,
InstructionStr if len(InstructionStr)>1 else efc.rPV('{}:IMAGE{}:Box4:DisplayText'.format(PVhead,ImageNo))],
#TimeStamp
[OverlayName+':SizeXDummy', 9999, OverlayName+':XPosDummy', -205+ArraySize[0], '', 0,
OverlayName+':SizeYDummy', 9999, OverlayName+':YPosDummy', -20+ArraySize[1], '', 0,
'%Y-%m-%d %H:%M:%S.%03f'],
#Stats
[OverlayName+':SizeXDummy', 9999, OverlayName+':XPosDummy', -300+ArraySize[0], '', 0,
OverlayName+':SizeYDummy', 9999, OverlayName+':YPosDummy', 10, '', 0,
'Stats: X:{:<3.3} Y:{:<3.3} sX:{:<3.3} sY:{:<3.3}'.format(str(efc.rPV(PVhead+':Stats2:CentroidX_RBV')),
str(efc.rPV(PVhead+':Stats2:CentroidY_RBV')),
str(efc.rPV(PVhead+':Stats2:SigmaX_RBV')),
str(efc.rPV(PVhead+':Stats2:SigmaY_RBV')))],
#Counts
[OverlayName+':SizeXDummy', 9999, OverlayName+':XPosDummy', -282+ArraySize[0], '', 0,
OverlayName+':SizeYDummy', 9999, OverlayName+':YPosDummy', 25, '', 0,
'Max:{:<5} Counts:{:<12}'.format(efc.rPV(PVhead+':Stats2:MaxValue_RBV'), int(efc.rPV(PVhead+':Stats2:Net_RBV')))],
#Alignment OK
[OverlayName+':SizeXDummy', 9999, '', 0, OverlayName+':XPosDummy', efc.rPV('{}:IMAGE{}:Box1:CenterX_RBV'.format(PVhead,ImageNo)),
OverlayName+':SizeYDummy', 9999, '', 0, OverlayName+':YPosDummy', efc.rPV('{}:IMAGE{}:Box1:CenterY_RBV'.format(PVhead,ImageNo))+efc.rPV('{}:IMAGE{}:Box1:SizeY_RBV'.format(PVhead,ImageNo))+40,
'AlignChk']
]
for ii in range(8):#eight overlays possible
if (LIVE==True and ii<5):
pass
else:
try:
#turn on stats...
efc.wPV(PVhead+':Stats{}:EnableCallbacks'.format(ImageNo), 1)
efc.wPV(PVhead+':Stats{}:ComputeCentroid'.format(ImageNo), 1)
efc.wPV(PVhead+':Stats{}:ComputeStatistics'.format(ImageNo), 1)
#set up everything...
OverlayNameField='{}{}'.format('Box' if ii<4 else 'Cross',(ii%4)+1)
OverlayName='{}:IMAGE{}:{}'.format(PVhead,ImageNo,OverlayNameField)
efc.wPV('{}:IMAGE{}:NDArrayPort'.format(PVhead,ImageNo),'IMAGE{}:Over'.format(ImageNo))
efc.wPV('{}:IMAGE{}:Over:EnableCallbacks'.format(PVhead,ImageNo),1)
efc.wPV(OverlayName+':Name', NameList[ii])#simple description field
efc.wPV(OverlayName+':Use', 1)#0=off,1=on
efc.wPV(OverlayName+':Shape', ShapeList[ii])#0=Cross,1=Rect,2=Text,3=Ellipse;;;DOCUMENTATION WRONG!! 2=Ellipse, 3=Text!!!!
efc.wPV(OverlayName+':DrawMode', 1)#0=set,1=XOR
efc.wPV(OverlayName+':Green', 2000 if (ii < 5 or LIVE == True) else 0)#set mono color; don't let last ones appear yet
if ShapeList[ii] == 0:
Width=3#1
elif ShapeList[ii] == 1:
Width=3
elif ShapeList[ii] == 2:
Width=3#1
elif ShapeList[ii] == 3:
Width=3
else:
pass
efc.wPV(OverlayName+':WidthX', Width)#width param for shape lines
efc.wPV(OverlayName+':WidthY', Width)#width param for shape lines
#all params needed for size and positioning of overlays
#try to filter out writing zeroes, as this seems to recalculate stuff inadvertantly??
#also: display text repeats if size is larger than needed for that string
# -->use -1 to indicate that the size needs to be calculated from the instruction length
# in order to show only the intended message
efc.wPV(OverlayName+':SizeXLink.DOL', ArgsList[ii][0].replace('XXXX',OverlayNameField))
if ArgsList[ii][1] > 0:
if ii==7:#special case for Alignment OK
ArgsList[ii][12]='Alignment {}'.format('NOT OK' if np.sqrt((efc.rPV(PVhead+':Stats2:CentroidX_RBV')-efc.rPV('{}:IMAGE{}:Box1:CenterX_RBV'.format(PVhead,ImageNo)))**2+(efc.rPV(PVhead+':Stats2:CentroidY_RBV')-efc.rPV('{}:IMAGE{}:Box1:CenterY_RBV'.format(PVhead,ImageNo)))**2) > int(efc.rPV('{}:IMAGE{}:Cross4:Name'.format(PVhead,ImageNo))[-3:]) else 'OK')
efc.wPV(OverlayName+':SizeX', ArgsList[ii][1] if ArgsList[ii][1] < 9999 else 9*len(ArgsList[ii][12]))
efc.wPV(OverlayName+':PositionXLink.DOL', ArgsList[ii][2].replace('XXXX',OverlayNameField))
if ArgsList[ii][3] > 0:
efc.wPV(OverlayName+':PositionX', ArgsList[ii][3])
efc.wPV(OverlayName+':CenterXLink.DOL', ArgsList[ii][4].replace('XXXX',OverlayNameField))
if ArgsList[ii][5] > 0:
efc.wPV(OverlayName+':CenterX', ArgsList[ii][5])
efc.wPV(OverlayName+':SizeYLink.DOL', ArgsList[ii][6].replace('XXXX',OverlayNameField))
if ArgsList[ii][7] > 0:
efc.wPV(OverlayName+':SizeY', ArgsList[ii][7] if 9999 > ArgsList[ii][7] >= 0 else 20)
efc.wPV(OverlayName+':PositionYLink.DOL', ArgsList[ii][8].replace('XXXX',OverlayNameField))
if ArgsList[ii][9] > 0:
efc.wPV(OverlayName+':PositionY', ArgsList[ii][9])
efc.wPV(OverlayName+':CenterYLink.DOL', ArgsList[ii][10].replace('XXXX',OverlayNameField))
if ArgsList[ii][11] > 0:
efc.wPV(OverlayName+':CenterY', ArgsList[ii][11])
if ShapeList[ii] == 3:#only needed for text boxes; NOT 2!!!
#use :DisplayText??
if ArgsList[ii][12][0] != '%':
if (len(ArgsList[ii][12]) > 1):
efc.wPV(OverlayName+':DisplayText', ArgsList[ii][12])
else:
efc.wPV(OverlayName+':TimeStampFormat', ArgsList[ii][12])
efc.wPV(OverlayName+':Font',3)
except:
print('Error setting up {}!'.format(NameList[ii]))
# =============================================================================
# def GigE_toggle_trigger(ENBINHReq, GigEreq='all', RepRateReq=5):
# """
# INHall, ENBall
# 5, 0
# """
# #check to see if UNI_EVR output is enabled or disabled before changing trigger timing of UNI_EVR
# #may also check for
# # individually set free run or fixed rate or ext trig in? use CAMconfig for this?
# if UNI_TRIG_IN_EVR in [44,45,46]:
# EFdelay=0e-3
# elif UNI_TRIG_IN_EVR in [177]:
# EFdelay=100e-3
# else:
# print('Unanticipated UNI_TRIG_IN_EVR case!')
#
# if command == 'INH':
# DG8_EF_Polarity = 'POS'
# elif command == 'ENA':
# DG_8_EF_Polarity = 'NEG'
# else:
# print('Unanticipated command case!')
# return
# =============================================================================
@classmethod
def ConfigReset(cls,CAMreq='all'):#not sure why these change sometimes -- config file somewhere? camera problem?
"""
if default CAMreq='all', executes the Config function serially on SPL1-10 with their own default values
can also accept a single specified camera input to reset
"""
if CAMreq == 'all':
ResetList=['Legend','StrInA','StrInB','MPA1In','MPA1Out','MPA2In','MPA2Xtal','MPA2Out','CompIn','CompOutFF','CompOutNF','Trap','SPFloat1','CompInNF','TTIn','TTOut']
else:
ResetList=[cls.Name(CAMreq,returnAll=True)[0]]
for eaEntry in ResetList:
if eaEntry == 'Legend':
cls.Config(CAMreq='Legend',RefXhairXY=[359,251],MisalignmentTolerance=40,ImageNo=2, RefXhairXYsize=[100,100],LIVE=False,InstructionStr='DO NOT tune to xhair!')
elif eaEntry == 'StrInA':
cls.Config(CAMreq='StrInA',RefXhairXY=[335,257],MisalignmentTolerance=25,ImageNo=2, RefXhairXYsize=[40,40],LIVE=False,InstructionStr='Use SM0 (NOT SM1!) to put centroid on xhair!')
elif eaEntry == 'StrInB':
cls.Config(CAMreq='StrInB',RefXhairXY=[334,243],MisalignmentTolerance=40,ImageNo=2, RefXhairXYsize=[40,40],LIVE=False,InstructionStr='Use SM2 to put centroid on xhair!')
elif eaEntry == 'MPA1In':
cls.Config(CAMreq='MPA1In',RefXhairXY=[152,147],MisalignmentTolerance=15,ImageNo=2, RefXhairXYsize=[25,25],LIVE=False,InstructionStr='Use SM3 to put centroid on xhair!')
elif eaEntry == 'MPA1Out':
cls.Config(CAMreq='MPA1Out',RefXhairXY=[411,249],MisalignmentTolerance=15,ImageNo=2, RefXhairXYsize=[40,40],LIVE=False,InstructionStr='Use SM4 to maximize spot!')
elif eaEntry == 'MPA2In':
cls.Config(CAMreq='MPA2In',RefXhairXY=[366,276],MisalignmentTolerance=15,ImageNo=2, RefXhairXYsize=[50,50],LIVE=False,InstructionStr='Use SM5(V) to put centroid on xhair!')
elif eaEntry == 'MPA2Out':
cls.Config(CAMreq='MPA2Out',RefXhairXY=[385,259],MisalignmentTolerance=40,ImageNo=2, RefXhairXYsize=[190,190],LIVE=False,InstructionStr='Use SM7 (and SM6) to put centroid on xhair!')
elif eaEntry == 'CompIn':
cls.Config(CAMreq='CompIn',RefXhairXY=[276/2,252/2],MisalignmentTolerance=50,ImageNo=2, RefXhairXYsize=[40,40],LIVE=False,InstructionStr='Use SM8 to put centroid on xhair!')
elif eaEntry == 'CompOutFF':
cls.Config(CAMreq='CompOutFF',RefXhairXY=[292,300],MisalignmentTolerance=25,ImageNo=2, RefXhairXYsize=[40,40],LIVE=False,InstructionStr='Use XPS2 Mid (and Input) Mirror to align to xhair!')
elif eaEntry == 'CompOutNF':
cls.Config(CAMreq='CompOutNF',RefXhairXY=[364,277],MisalignmentTolerance=40,ImageNo=2, RefXhairXYsize=[40,40],LIVE=False,InstructionStr='Use XPS2 Input(/Mid) Mirror to align to xhair!')
elif eaEntry == 'SPFloat1':
cls.Config(CAMreq='SPFloat1',RefXhairXY=[360,230],MisalignmentTolerance=40,ImageNo=2, RefXhairXYsize=[40,40],LIVE=False,InstructionStr='test')
elif eaEntry == 'Trap':
cls.Config(CAMreq='Trap',RefXhairXY=[215,156],MisalignmentTolerance=40,ImageNo=2, RefXhairXYsize=[40,40],LIVE=False,InstructionStr='Watch for back reflections!!')
elif eaEntry == 'CompInNF':
cls.Config(CAMreq='CompInNF',RefXhairXY=[304,210],MisalignmentTolerance=50,ImageNo=2, RefXhairXYsize=[180,180],LIVE=False,InstructionStr='Use SM7 to center beam on xhair!')
else:
print('Camera configuration not found: '+eaEntry)
return
class TTL_shutter:
"""
Class for organizing utilities for the TTL-triggerable beam shutters in the LPL and SPL
Current shutter locations (and names) are as follows:
:LPL: 4 x immediately before the 2" heads on 'AB', 'EF', 'GH', 'IJ'
: 2 x immediately before the periscopes going into the chamber, 'WEST 527' ('WW') and 'EAST 527' ('XX')
:SPL: 1 x before the compressor grating of the Legend ('REGEN' or 'ZZ')
Shutters do not possess internal state sensor, so state of each shutter (open vs closed) tracked using notepad PVs
Typical usage via TTL_shutter.[command]
Possible commands include:
:Status #provides the current status (open vs closed) of each shutter
:Toggle #allows control of shutters to open/close/toggle state
:Refresh #refresh state tracker if shutters are touched manually or Beckhoff has problems, etc.
"""
def Status(display=True):
"""
Provides the current status (open vs closed) of all shutters, returning an array of 0s and 1s
Array values correspond to shutters in the following order:
: ['AB','EF','GH','IJ','WEST 527','EAST 527','REGEN']
Example: if all shutters are closed except for 'AB', 'EF', and 'WEST 527', the output will be
[0,0,1,1,0,1,1]
Option: if display=True then a status report is printed out to terminal in addition to returning the status array
: if display=False then only the status array is returned; nothing is printed to terminal
"""
pvstatuslist=['MEC:LAS:FLOAT:'+str(ii) for ii in range(14,21)];
statuslist=[]
for eapv in pvstatuslist:
temppv=EpicsSignal(eapv);
statuslist.append(int(temppv.get()))#0=open,1=closed
if display:
shutlist=['AB','EF','GH','IJ','WEST 527','EAST 527','REGEN']
print('(0=out,1=in) '+', '.join([ea_shut+':'+efc.cstr(ea_stat,'BRR' if ea_stat else '') for ea_shut,ea_stat in zip(shutlist,statuslist)]))
return statuslist
@classmethod
def Toggle(cls,ArmStrQ,display=True):
"""
Toggles the current state of all shutters mentioned in ArmStrQ and then returns the shutter status at the end
Using 'open' or 'close' at the beginning of ArmStrQ simply makes sure that all specified arms end up in the specified state
rather than just toggling the state, i.e. it will toggle specified arms only if the specified state is not already satisfied
Shutter options are 'AB', 'EF', 'GH', 'IJ', 'WW' (west 527), 'XX' (east 527), and 'ZZ' (SPL regen)
Using 'all' is equivalent to using 'ABEFGHIJWWXX' (note that the regen shutter 'ZZ' is left out)
Example: Toggle('ABEF') will toggle the 'AB' and 'EF' shutters from whatever their current state is,
i.e. if 'AB' was open and 'EF' was closed then Toggle('ABEF') would close 'AB' and open 'EF'
Example: Toggle('openABIJ') will make sure 'AB' and 'IJ' shutters end up in the open position,
i.e. if 'AB' was closed and 'IJ' was open then 'AB' would be toggled to open and 'IJ' would remain open
Example: Toggle('closeall') will make sure all LPL shutters end up in the closed position,
i.e. if 'AB', 'EF', 'GH', and 'IJ' were initially open and 'WW' and 'XX' were initially closed then
'AB', 'EF', 'GH', and 'IJ' would be toggled to closed and 'WW' and 'XX' would remain in the closed position
Example: Toggle('all') toggles the state of all LPL shutters so that the configuration is exactly the opposite of the initial state,
i.e. if 'AB', 'EF', 'GH', and 'IJ' were initially open and 'WW' and 'XX' were initially closed then
'AB', 'EF', 'GH', and 'IJ' would be toggled to closed and 'WW' and 'XX' would be toggled to opened
Option: display=True reads back and prints the initial state, makes the toggle, and then reads back and prints the final state
Option: display=False still returns the shutter status at the end but does not print anything to terminal
"""
#could add GLOBALS.TTLAB etc later
AllStrq='abefghijwwxxzz';#ww is WEST 527, xx is EAST 527, zz is REGEN SHUT
ArmStrQ=ArmStrQ.lower().replace('all','abefghijwwxx')
currTTLstate=cls.Status(display=False)[:-1];
if ArmStrQ.lower()[:4] == 'clos':
ArmStrQ=''.join(sorted(ArmStrQ[5:].lower()))
ArmStrQ=''.join([shutt for shutt,oshutt in zip(re.findall('..','abefghijwwxx'),currTTLstate) if oshutt==0 and shutt in ArmStrQ])
elif ArmStrQ.lower()[:4] == 'open':
ArmStrQ=''.join(sorted(ArmStrQ[4:].lower()))
ArmStrQ=''.join([shutt for shutt,oshutt in zip(re.findall('..','abefghijwwxx'),currTTLstate) if oshutt==1 and shutt in ArmStrQ])
else:
ArmStrQ=''.join(sorted(ArmStrQ.lower()))
if display:
print('Initially: ',end='',flush=True)
statuslist=cls.Status(display=display)
for ii in range(len(AllStrq)//2):
if (AllStrq[2*ii] in ArmStrQ.lower()) or (AllStrq[2*ii+1] in ArmStrQ.lower()):
temppv1=EpicsSignal('MEC:LAS:TTL:0'+str(ii+1));temppv2=EpicsSignal('MEC:LAS:FLOAT:'+str(ii+14));
temppv1.put(1);
temppvdur=EpicsSignal('MEC:EK9K1:BO5:1.HIGH');
time.sleep(temppvdur.get()*0.5);
for ii in range(len(AllStrq)//2):
if (AllStrq[2*ii] in ArmStrQ.lower()) or (AllStrq[2*ii+1] in ArmStrQ.lower()):
temppv1=EpicsSignal('MEC:LAS:TTL:0'+str(ii+1));temppv2=EpicsSignal('MEC:LAS:FLOAT:'+str(ii+14));
if temppv1.get():
temppv2.put((1+temppv2.get())%2);
else:
print('Warning: no shutter toggle detected; status not updated! '+AllStrq[2*ii:2*ii+2])
if display:
print('Finally: ',end='',flush=True)
statuslist=cls.Status(display=display)
return statuslist
def Refresh():
"""
Refreshes state tracker if shutters are touched manually or Beckhoff has problems, etc.
Ensure all shutters are open before executing the function in order to insure that the notepad PVs reset correctly
"""
pvlist=['MEC:LAS:FLOAT:'+str(ii) for ii in range(14,21)];
inddesclist=['AB','EF','GH','IJ','WEST (ABEF)','EAST (GHIJ)','Regen']
desclist=[inddesc+' shutter state' for inddesc in inddesclist]
valulist=[0,0,0,0,0,0,0];
print('This will reset the shutter counter! Are you sure you want to continue? [y/n]',end='',flush=True)
checkprompt=efc.getch_with_TO(TOsec=10,display=False);
if checkprompt not in ('y','Y'):
print('Try again later then!');
return
else:
print('OK, I hope you know what you\'re doing!')
print('Resetting shutter PV statuses... Please insure all shutters are actually open!!')
for jj in range(len(pvlist)):
temppv1=EpicsSignal(str(pvlist[jj]+'.DESC'));temppv2=EpicsSignal(pvlist[jj]);
temppv1.put(desclist[jj]);temppv2.put(valulist[jj]);
class DG645:
"""
Potential future class containing DG645-related utilities such as:
- take a snapshot of settings to be restored later with a restore function
- restore saved data from a snapshot function into the corresponding DG645
- perform snapshots of multiple DG boxes at the same, etc.
- change labels of channels to be more descriptive
"""
# =============================================================================
# def Refresh():
# """
#
# """
# pvlist=[['MEC:LAS:DDG:0'+str(numii)+':'+chii+'DelaySI.DESC' for chii in ['a','c','e','g']] for numii in [1,2,6,8]];
# desclist=[['A:PS LATE','C:INH PS EARLY','E:unused','G: unused'],['A:PS EARLY','C:unused','E:EvoHE1','G:EvoHE2'],['A:GaiaQSW','C:GaiaLamp','E:INH UNI','G:GigE TRIG IN'],['A:BIG UNI','C:small UNI','E:INH GigE','G:unused']]
# for eachboxii in range(len(pvlist)):
# for eachentryii in range(len(eachboxii)):
# temppv=EpicsSignal(pvlist[eachboxii][eachentryii])
# temppv.put(desclist[eachboxii][eachentryii])
#
# def Snapshot():
# """
# saves data into file that has array structure for easy unpacking
# """
# pass
#
# def DGrecordall():
# fullstr=datetime.now().strftime('%Y%m%d_%H%M%S')+'\n'#date.today.strftime('%Y%m%d')
# DGnam=['SLICER','MPA1','MASTER','UP/DOWN','STREAK','MPA2','USR','UNIBLITZ']
# for DGno in [1,2,3,4,5,6,8]:
# fullstr+=DGnam[DGno-1]+'\n'
# for lett in ['a','b','c','d','e','f','g','h']:
# head='MEC:LAS:DDG:{:0>2}:{}DelaySI'.format(DGno,lett)
# tail='.DESC'
# tpv1=EpicsSignal(head);tpv2=EpicsSignal(head+tail);
# try:
# fullstr+='{}: {}'.format(tpv2.get(),tpv1.get())+'\n'
# except:
# fullstr+='ERROR'+'\n'
# with open(str(LPL.psfilepath()+'DG/snapshot'+LPL._DateString()+'.txt'),'a') as out:
# out.write(fullstr)
#
# def DGsaveref(Laser):
# if Laser.lower() == 'spl':
# DGnam=['SLICER','MPA1','MPA2','UNIBLITZ']
# DGno=[1,2,6,8]
# elif Laser.lower() == 'lpl':
# DGnam=['MASTER','UP/DOWN','STREAK']
# DGno=[3,4,5]
# else:
# print('Choose LPL or SPL!'); return
# #MEC:LAS:DDG:03:gReferenceMO
# #re.findall('(.+) \+ (.+)','A + 1.005e-6')
# return (DGnam, DGno)
#
# def ChannelDescription(DGbox):
# """ """
# pass
pass
class SPL:#pointing, alignment, and other macros + automation routines
"""
Potential future class containing SPL-related utilities such as:
- pointing functions
- aligment functions
- other macros + automation routines
"""
pass
class UNIBLITZ:
"""
Potential future class containing UNIBLITZ-related utilities such as:
- toggle system (DG boxes, etc.) triggering configurations (e.g. alignment vs SS vs burst modes, etc.)
- toggle shutter state
- toggle trigger state
"""
# =============================================================================
# def UNIBLITZconfig(modeReq):
# """
# Configures UNIBLITZ shutter state and triggers for different MEC SPL modes:
#
# modeReq= 6mm state 6mm TRIG? 65mm state 65mm TRIG?
# ----------------------------------------------------------
# 'alignment'--> OPEN INH OPEN INH
# 'blocked' --> CLOSED INH CLOSED INH
# 'MPA1SS' --> CLOSED ENABLED OPEN INH
# 'MPA1Burst'--> CLOSED** INH** OPEN INH
# 'MPA2SS' --> CLOSED ENABLED CLOSED ENABLED
# 'MPA2Burst'--> CLOSED ENABLED OPEN INH
# """
# if modeReq.casefold() == 'alignment':
# UNI_toggle_trigger('INHall')
# UNI_toggle_shutter('openall')
# elif modeReq.casefold() == 'blocked':
# UNI_toggle_trigger('INHall')
# UNI_toggle_shutter('closeall')
# elif modeReq.casefold() == 'mpa1ss':
# UNI_toggle_trigger('INH65mm')
# UNI_toggle_shutter('open65mm')
# UNI_toggle_trigger('ENA06mm')
# UNI_toggle_shutter('close06mm')
# elif modeReq.casefold() == 'mpa1burst':
# UNI_toggle_trigger('INHall')
# UNI_toggle_shutter('open65mm')
# UNI_toggle_shutter('close06mm')
# elif modeReq.casefold() == 'mpa2ss':
# UNI_toggle_trigger('ENAall')
# UNI_toggle_shutter('closeall')
# elif modeReq.casefold() == 'mpa2burst':
# UNI_toggle_trigger('INH65mm')
# UNI_toggle_shutter('open65mm')
# UNI_toggle_trigger('ENA06mm')
# UNI_toggle_shutter('close06mm')
# else:
# print('Please choose a valid configuration mode!')
# print(UNIBLITZconfig.__doc__)
#
# def UNI_toggle_shutter(ENAINHReq):
# """
# [command=open|close][shutdia=06|65|all]mm
# """
# #get a status readback capability...
# if shutdia=='all':
# templist=['06','65']
# else:
# if shutdia in ['06','65']:
# templist=[shutdia]
# else:
# print("Only accepts '06' or '65' or 'all' for shutter specification!")
# return False
# if command=='open':
# polarity='POS'#get actual val
# elif command=='close':
# polarity='NEG'#get actual val
# else:
# print("Only accepts 'open' or 'close' for shutter command!")
# return False
# for eashutt in templist:
# wPV('PV:CH{}:Polarity'.format(chAB if eashutt=='65' else chCD), polarity)#get actual chan val and PV
# return
#
#
# def UNI_toggle_trigger(ENAINHReq, RepRateReq=0):
# """
# INHall, INH65mm == ENA06mm, ENBall
# RepRateReq
# all need to address UNI_TRIG_IN EVR channel, which needs to be reestablished... (stolen!)
# """
# #get a status readback capability...
# if 'INH':
# pass
# return
# =============================================================================
pass
class Spectrometer:
"""
Potential future class containing spectrometer-related utilities such as:
- Qmini configuration
- Qmini readout
- floater Ocean Optics/other spectrometers?
"""
pass
class VISAR:
"""
Potential future class containing VISAR-related utilities such as:
- laser triggering/any other EPICS-connected functions (may need to be expanded)
- streak cameras (timing or configuration with shot or whatever is useful)
"""
pass
class CtrlSys:
"""
Class for monitoring (and potentially controlling) laser-related control systems
Typical usage via CtrlSys.[command]
Possible commands include:
:cmp_checker #checks a list of relevant computers to see if they are pinging the network
:pv_checker #checks a list of relevant PVs for current RBV, to see if their IOCs/host are live, etc.
Potential future improvements:
- expand list of PVs checked in pv_checker
- add functionality for checking current PV RBV vs historical reference or allowed range
- consider adding functionality of ping, netconfig search, grep_pv, grep_ioc, serverStat, imgr, etc.
- when pinging motors, check the control box itself too? (see Stage.NewportBrowserCtrl)
"""
@staticmethod
def _plzchkpv(inpvnam):
"""
internal function meant for checking PV read-back values, used as part of pv_checker
takes a single PV as input, outputs the message to be displayed as part of pv_checker
"""
try:
currval=efc.rPV(inpvnam,display=False)
if currval is False:
msgout='rPV fail!'
else:
currvalstr=str(currval)
if len(currvalstr) > 10:
currvalstr=currvalstr[:10]+'...'
msgout = currvalstr+' vs oldval'
except TimeoutError as err:
msgout = 'Timeout!';err;
return msgout
@staticmethod
def _plzchksrv(inpvnam):
"""
internal function meant for checking server status, used as part of pv_checker
takes a single PV as input, outputs the message to be displayed as part of pv_checker
(internally uses grep_pv, grep_ioc, ping, and netconfig search to gather necessary information)
"""
pvnam=re.findall('^([^\.]+)',inpvnam);
if len(pvnam)>0:
iocnam=re.findall('/reg/d/iocData/(.+)/iocInfo',os.popen('grep_pv '+pvnam[0]).read());
else:
pvnam=['Fail!']
iocnam=[]
if len(iocnam)>0:
hostnam=re.findall('host: \'([^,]+)\', ', os.popen('grep_ioc '+iocnam[0]).read());
else:
iocnam=['Fail!']
hostnam=[]
if len(hostnam)>0:
netstat=os.system('ping -c 1 -w2 '+hostnam[0]+' > /dev/null 2>&1')
locstat=re.findall('Location: (.+)\n', os.popen('netconfig search '+hostnam[0]).read());
else:
hostnam=['Fail!']
netstat=-1
locstat=['Fail!']
#msgout='PV: '+'{:<34}'.format(pvnam[0])+'IOC: '+'{:<26}'.format(iocnam[0])+'Host: '+'{:<16}'.format(hostnam[0])+'Host rack:'+'{:<19}'.format(locstat[0])+' Host Ping?: '+str('true ' if netstat==0 else 'false')
msgout=['{:<34}'.format(pvnam[0]),'{:<26}'.format(iocnam[0]),'{:<19}'.format(hostnam[0]),'{:<25}'.format(locstat[0]),efc.cstr('{:<6}'.format(str('true ' if netstat==0 else 'false')),str('blink,r' if netstat!=0 else ''))]
return msgout
@staticmethod
def _plzchkcmp(cmpnam):
"""
internal function meant for checking computer status as part of cmp_checker
takes a single computer name as input (e.g. from _MECcompylist()),
outputs the message to be displayed as part of cmp_checker
"""
try:
netstat=os.system('ping -c 1 -w2 '+cmpnam[0]+' > /dev/null 2>&1')
msgout=['{:<15}'.format(cmpnam[1])+'{:<28}'.format(cmpnam[0])+efc.cstr('{:<6}'.format(str('true ' if netstat==0 else 'false')),str('blink,r' if netstat!=0 else ''))]
except:
msgout=['{:<15}'.format(cmpnam[1])+'{:<28}'.format(cmpnam[0])+efc.cstr('{:<6}'.format('fail!'),'blink,r')]
return msgout
# =============================================================================
# @staticmethod
# def MECcompylist000():
# qqip=['172.21.46.147','172.21.46.148','172.21.46.146','172.21.46.60','172.21.46.128','172.21.46.100', '172.21.46.120','172.21.46.159', '172.21.46.197','172.21.46.142','172.21.46.70','172.21.46.78', '172.21.46.71','172.21.46.88','172.21.46.198','172.21.46.213','172.21.46.215','172.21.46.136', '172.21.46.218','172.21.46.219','172.21.46.182','172.21.46.144'];
# qqn=['evo1','evo2','gaia','lecroy1','lecroy2','lecroya','lecroyb','PIMikroMove','spider','spectrometer', 'tundra','topas','visar1','visar2','vitara','rga','emp','phasicslaptop','phasics1','phasics2','dacage','legend']
# nmlist=['mec-las-laptop06','mec-las-laptop07','mec-las-laptop05','scope-ics-mectc1-1','scope-ics-meclas-lecroy01','scope-ics-meclas-lecroy-a','scope-ics-meclas-lecroy-b','mec-las-laptop09','mec-las-laptop11','mec-las-laptop01','win-ics-mec-tundra','mec-las-laptop12','win-ics-mec-visar1','win-ics-mec-visar2','mec-las-vitara','mec-rga-laptop','scope-ics-mec-tektronix','mec-phasics-laptop01','win-ics-mec-phasics01','win-ics-mec-phasics02','mec-visar-cage','mec-las-laptop03']
# return list(zip(nmlist,qqip,qqn))
# =============================================================================
@staticmethod
def _MECcompylist():
"""
internal list of MEC computers to check as part of cmp_checker()
"""
qqn=['mec-laser','mec-monitor','mec-daq','mec-control','mec-console',
'vitara','legend','evo1','evo2','gaia','topas',
'PIMikroMove','spider','spectrometer','tundra',
'lecroy1','lecroy2','lecroya','lecroyb',
'dacage','visar1','visar2','rga','emp','phasicslaptop','phasics1','phasics2']
nmlist=['mec-laser','mec-monitor','mec-daq','mec-control','mec-console',
'mec-las-vitara','mec-las-laptop03','mec-las-laptop06','mec-las-laptop07','mec-las-laptop05','mec-las-laptop12',
'mec-las-laptop09','mec-las-laptop11','mec-las-laptop01','win-ics-mec-tundra',
'scope-ics-mectc1-1','scope-ics-meclas-lecroy01','scope-ics-meclas-lecroy-a','scope-ics-meclas-lecroy-b',
'mec-visar-cage','win-ics-mec-visar1','win-ics-mec-visar2','mec-rga-laptop','scope-ics-mec-tektronix','mec-phasics-laptop01','win-ics-mec-phasics01','win-ics-mec-phasics02']
return list(zip(nmlist,qqn))
@classmethod
def cmp_checker(cls):
"""
checks a list of critical MEC computers to see if they are pinging or not, prints the status report to terminal
(note: just because a machine is not pinging does not mean it is necessarily offline -- it may just not be responding to pings!)
"""
cmpmsg=[];
#qqpl=[eacmp[0] for eacmp in MECcompylist()]
qqpl=cls._MECcompylist()
with multiprocessing.Pool() as pool:
cmpmsg=pool.map(cls._plzchkcmp,qqpl)
print('{:<15}'.format('Computer name')+'{:<28}'.format('IP shorthand')+'{:<6}'.format('Ping?'))
for ii in range(len(cmpmsg)):
print(''.join(cmpmsg[ii]))
return
@classmethod
def pv_checker(cls,pv='lpl'):
"""
Checks a list of PVs critical for running either the MEC SPL or LPL laser systems or any user-supplied PV(s)
If no argument is provided, it is assumed that one wants to check the PVs for the LPL (hence pv='lpl')
Instead, if pv='spl' then the PVs for the SPL are checked instead
Note: list of PVs to be checked for the LPL or SPL laser can be edited in the files
GLOBAL.PSFILEPATH+'_ps_pvlist_lpl.txt' and GLOBAL.PSFILEPATH+'_ps_pvlist_spl.txt'
The function prints a report of the PV statuses, which includes PV name, IOC name, Host name, Host location, Ping?, and PV value?
Returns False if any of the PVs/hosts timeout or fail to ping
Returns True if none of the PVs/hosts timeout or fail to ping
"""
if isinstance(pv,str):
if (pv.lower() == 'lpl'):
qqpl=np.genfromtxt(GLOBAL.PSFILEPATH+'_ps_pvlist_lpl.txt',delimiter='\n',dtype=str);
elif (pv.lower() == 'spl'):
qqpl=np.genfromtxt(GLOBAL.PSFILEPATH+'_ps_pvlist_spl.txt',delimiter='\n',dtype=str);
else:
qqpl=(pv,);
elif (isinstance(pv, tuple)) or (not isinstance(pv, list)):
qqpl=pv;
else:
print('Please input a valid PV or list of PVs to check!')
return False
pvmsg=[];
for eapv in qqpl:
pvret=cls._plzchkpv(eapv)
pvmsg.append(efc.cstr(pvret,'BRR,BLINK' if (pvret in ('rPV fail!','Timeout!')) else ''))
with multiprocessing.Pool() as pool:
srvmsg=pool.map(cls._plzchksrv,qqpl)
print(''.join(['{:<34}'.format('PV name'),'{:<26}'.format('IOC name'),'{:<19}'.format('Host name'),'{:<25}'.format('Host location'),'{:<6}'.format('Ping?'),'{:<15}'.format('PV value?')]))
for ii in range(len(pvmsg)):
fullmsg=''.join(srvmsg[ii])+pvmsg[ii]
print(fullmsg)
if any(errmsg in fullmsg for errmsg in ('Timeout','Fail', 'false', 'False')):
return False
else:
return True
class SCALLOPS:
"""
Potential future class containing VISAR-related utilities such as:
- laser triggering/any other EPICS-connected functions (may need to be expanded)
- streak cameras (timing or configuration with shot or whatever is useful)
"""
# =============================================================================
# potential future work
# - port everything over from Bethany and Patin
# - improved background subtraction needed for improved transfer function accuracy?
# - need LPL.Deconv too?
# - test shot routine with triangle pulse (etc.) to grab day's starting transfer function guess?
# - fit model that makes everything most self consistent; what gets weighed most heavily?
# - (note: _EW2 probably not useful in that case)
# - what will recipe look like when based on SCALOPS? what will recipe contain?
# -
# -
# -
# =============================================================================
pass
class LabEnv:
"""
Potential future class containing lab environment-related utilities such as:
- air/enclosure/rack temperature/humidity/etc.
- dust/particulate levels
"""
pass
class RIS:
"""
Potential future class containing RIS-related utilities such as:
- easy RBV on state, radiation sensors, etc.
"""
pass
class PDU:
"""
Potential future class containing PDU-related utilities such as:
- quick and unified access/configuration/toggling of lab PDUs
"""
pass
class GLOBAL:
"""
Class meant to serve like global variables for all other meclas classes
Idea is that only the contents of this class need to change if PV names change, calibrations or other
constants need to be updated, etc.
Typical usage via GLOBAL.[attribute] where attributes might be PVs, constants, arrays, strings, etc.
Potential future improvements:
- convert more of the parameters above into GLOBAL attributes for the sake of simplicity, clarity, longevity
- add more restoration-type functions
"""
EYFE=EpicsSignal('MEC:LAS:FLOAT:01');
ECD1w=EpicsSignal('MEC:LAS:FLOAT:02');
EAB1w=EpicsSignal('MEC:LAS:FLOAT:03');
EEF1w=EpicsSignal('MEC:LAS:FLOAT:04');
EGH1w=EpicsSignal('MEC:LAS:FLOAT:05');
EIJ1w=EpicsSignal('MEC:LAS:FLOAT:06');
EAB2w=EpicsSignal('MEC:LAS:FLOAT:07');
EEF2w=EpicsSignal('MEC:LAS:FLOAT:08');
EGH2w=EpicsSignal('MEC:LAS:FLOAT:09');
EIJ2w=EpicsSignal('MEC:LAS:FLOAT:10');
CurrExp=EpicsSignal('MEC:LAS:FLOAT:11.DESC')
CurrRun=EpicsSignal('MEC:LAS:FLOAT:11')
CurrShape=EpicsSignal('MEC:LAS:FLOAT:12.DESC');
CurrShapeLoadTime=EpicsSignal('MEC:LAS:FLOAT:12');
#=EpicsSignal('MEC:LAS:FLOAT:13');
TTLAB=EpicsSignal('MEC:LAS:FLOAT:14');
TTLEF=EpicsSignal('MEC:LAS:FLOAT:15');
TTLGH=EpicsSignal('MEC:LAS:FLOAT:16');
TTLIJ=EpicsSignal('MEC:LAS:FLOAT:17');
TTLWW=EpicsSignal('MEC:LAS:FLOAT:18');
TTLXX=EpicsSignal('MEC:LAS:FLOAT:19');
TTLREGEN=EpicsSignal('MEC:LAS:FLOAT:20');
#
EREGEN=EpicsSignal('MEC:LAS:FLOAT:21')
ETOPAS=EpicsSignal('MEC:LAS:FLOAT:22')
EMPA1=EpicsSignal('MEC:LAS:FLOAT:23')
EMPA2=EpicsSignal('MEC:LAS:FLOAT:24')
#=EpicsSignal('MEC:LAS:FLOAT:25')
#=EpicsSignal('MEC:LAS:FLOAT:26')
#=EpicsSignal('MEC:LAS:FLOAT:27')
#=EpicsSignal('MEC:LAS:FLOAT:28')
#=EpicsSignal('MEC:LAS:FLOAT:29')
#=EpicsSignal('MEC:LAS:FLOAT:30')
EcoeffYFE=EpicsSignal('MEC:LAS:FLOAT:31')
Ecoeff1in1wCD=EpicsSignal('MEC:LAS:FLOAT:32')
Ecoeff2in1wAB=EpicsSignal('MEC:LAS:FLOAT:33')
Ecoeff2in1wEF=EpicsSignal('MEC:LAS:FLOAT:34')
Ecoeff2in1wGH=EpicsSignal('MEC:LAS:FLOAT:35')
Ecoeff2in1wIJ=EpicsSignal('MEC:LAS:FLOAT:36')
Ecoeff2in2wAB=EpicsSignal('MEC:LAS:FLOAT:37')
Ecoeff2in2wEF=EpicsSignal('MEC:LAS:FLOAT:38')
Ecoeff2in2wGH=EpicsSignal('MEC:LAS:FLOAT:39')
Ecoeff2in2wIJ=EpicsSignal('MEC:LAS:FLOAT:40')
#(w/y/s1in1w/s42in1w/s42in2w/s + DateStr/today, RunNum, RunFilePath, PulseEnergies; notepadPVs: HAWG; YFE; 1w,2w,etc.; recipe better than pickle?)
#last updated 20220128
EcoeffYFEval=0.3285 #was .3578
Ecoeff1in1wCDval=0.5871
Ecoeff2in1wABval=224.0
Ecoeff2in1wEFval=177.5
Ecoeff2in1wGHval=260.9826
Ecoeff2in1wIJval= 113.2
Ecoeff2in2wABval=134.0135*0.9250
Ecoeff2in2wEFval=165.2398*0.9978
Ecoeff2in2wGHval=194.1412*1.0653
Ecoeff2in2wIJval=156.9307*0.9198
EcoeffRE1 = 1.64e5
EcoeffRE0 = 1.03156061e-01
EcoeffTO1 = 3.48e7
EcoeffTO0 = - 1.63e1
EcoeffM11 = 1.81e5
EcoeffM10 = - 0.301
EcoeffM21 = 1.05e5
EcoeffM20 = - 1.39e-1
PSNS=EpicsSignal('MEC:LAS:ARRAY:01')
SSS=EpicsSignal('MEC:LAS:ARRAY:02')
WVFMHAWG=EpicsSignal('MEC:LAS:ARRAY:03')
WVFMYFE=EpicsSignal('MEC:LAS:ARRAY:04')
WVFMYFEGOAL=EpicsSignal('MEC:LAS:ARRAY:05')
WVFM1IN1w=EpicsSignal('MEC:LAS:ARRAY:06')
WVFM2IN1w=EpicsSignal('MEC:LAS:ARRAY:07')
WVFM2IN2w=EpicsSignal('MEC:LAS:ARRAY:08')
WVFM2IN2wGOAL=EpicsSignal('MEC:LAS:ARRAY:09')
YSSS=EpicsSignal('MEC:LAS:ARRAY:10')
HWPAB=EpicsSignal(read_pv='MEC:NS1:MMS:02.RBV',write_pv='MEC:NS1:MMS:02.VAL');
HWPEF=EpicsSignal(read_pv='MEC:NS1:MMS:01.RBV',write_pv='MEC:NS1:MMS:01.VAL');
HWPGH=EpicsSignal(read_pv='MEC:LAS:MMN:30.RBV',write_pv='MEC:LAS:MMN:30.VAL');
HWPIJ=EpicsSignal(read_pv='MEC:LAS:MMN:29.RBV',write_pv='MEC:LAS:MMN:29.VAL');
HWPABoff=EpicsSignal('MEC:NS1:MMS:02.OFF');#dial offset
HWPEFoff=EpicsSignal('MEC:NS1:MMS:01.OFF');#dial offset
HWPGHoff=EpicsSignal('MEC:LAS:MMN:30.OFF');#dial offset
HWPIJoff=EpicsSignal('MEC:LAS:MMN:29.OFF');#dial offset
HWPABclr=EpicsSignal('MEC:NS1:MMS:02:SEQ_SELN');#clear start for mforce chassis
HWPEFclr=EpicsSignal('MEC:NS1:MMS:01:SEQ_SELN');#clear start for mforce chassis
EVRLPLLAMPEC=EpicsSignal(read_pv='MEC:LAS:EVR:01:TRIG7:EC_RBV',write_pv='MEC:LAS:EVR:01:TRIG7:TEC')#LPL lamp event code; needs 182
EVRLPLLAMPEN=EpicsSignal('MEC:LAS:EVR:01:TRIG7:TCTL') #LPL lamp enable;
EVRLPLSSEC=EpicsSignal(read_pv='MEC:LAS:EVR:01:TRIG8:EC_RBV', write_pv='MEC:LAS:EVR:01:TRIG8:TEC')#LPL slicer event code; needs 182 or 43 typically
EVRLPLSSEN=EpicsSignal('MEC:LAS:EVR:01:TRIG8:TCTL') #LPL slicer enable;
EGSPLRE=EpicsSignal('MEC:LAS:GENTEC:07:CH1:MEAS')
EGSPLTO=EpicsSignal('MEC:LAS:GENTEC:07:CH2:MEAS')
EGSPLM1=EpicsSignal('MEC:LAS:GENTEC:06:CH1:MEAS')
EGSPLM2=EpicsSignal('MEC:LAS:GENTEC:06:CH2:MEAS')
EGLPLYFE=EpicsSignal('MEC:LAS:LEM:03:A:CUR_DISP')
EGLPL1in1w=EpicsSignal('MEC:LAS:LEM:03:B:CUR_DISP')
EGLPL2in1wAB=EpicsSignal('MEC:LAS:GENTEC:02:CH1:MEAS')
EGLPL2in1wEF=EpicsSignal('MEC:LAS:GENTEC:02:CH2:MEAS')
EGLPL2in1wGH=EpicsSignal('MEC:LAS:GENTEC:01:CH1:MEAS')
EGLPL2in1wIJ=EpicsSignal('MEC:LAS:GENTEC:01:CH2:MEAS')
EGLPL2in2wAB=EpicsSignal('MEC:LAS:GENTEC:03:CH1:MEAS')
EGLPL2in2wEF=EpicsSignal('MEC:LAS:GENTEC:03:CH2:MEAS')
EGLPL2in2wGH=EpicsSignal('MEC:LAS:GENTEC:04:CH1:MEAS')
EGLPL2in2wIJ=EpicsSignal('MEC:LAS:GENTEC:04:CH2:MEAS')
EGLPLWest=EpicsSignal('MEC:GENTEC:01:CH2:MEAS');
EGLPLEast=EpicsSignal('MEC:GENTEC:01:CH1:MEAS');
MBCpwr=EpicsSignal('MEC:64B:PWR:2:Outlet:8:SetControlAction')#WAS 'MEC:S60:PWR:01:Outlet:7:SetControlAction'#read AND write:1=ON,2=OFF
MBCmode=EpicsSignal('MEC:LPL:MBC:01:RunningMode_RBV',write_pv='MEC:LPL:MBC:01:RunningMode')#AUTO=0,MAN=1
MBCsetpt=EpicsSignal('MEC:LPL:MBC:01:AutoCalibration.VAL',write_pv='MEC:LPL:MBC:01:AutoCalibration') #QUAD=0,MIN=1,MAX=2
MBCbias=EpicsSignal('MEC:LPL:MBC:01:BiasValue_RBV',write_pv='MEC:LPL:MBC:01:BiasValue')
MBCfault=EpicsSignal('MEC:LPL:MBC:01:ErrorStatus',write_pv='MEC:LPL:MBC:01:ClearErrors')
SHGABmot=EpicsSignal(read_pv='MEC:LAS:MMN:22.RBV',write_pv='MEC:LAS:MMN:22.VAL')
SHGEFmot=EpicsSignal(read_pv='MEC:LAS:MMN:24.RBV',write_pv='MEC:LAS:MMN:24.VAL')
SHGGHmot=EpicsSignal(read_pv='MEC:LAS:MMN:17.RBV',write_pv='MEC:LAS:MMN:17.VAL')
SHGIJmot=EpicsSignal(read_pv='MEC:LAS:MMN:18.RBV',write_pv='MEC:LAS:MMN:18.VAL')
PFNSS=EpicsSignal('MEC:PFN:SINCESHOT')
PFNmode=EpicsSignal('MEC:PFN:MODE')
PFNCDEN=EpicsSignal(read_pv='MEC:PFN:CH0:ENABLE_RBV',write_pv='MEC:PFN:CH0:ENABLE');
PFNCDCS=EpicsSignal('MEC:PFN:CH0:CHARGE_STATE');
PFNAEN=EpicsSignal(read_pv='MEC:PFN:CH1:ENABLE_RBV',write_pv='MEC:PFN:CH1:ENABLE');
PFNACS=EpicsSignal('MEC:PFN:CH1:CHARGE_STATE');
PFNBEN=EpicsSignal(read_pv='MEC:PFN:CH2:ENABLE_RBV',write_pv='MEC:PFN:CH2:ENABLE');
PFNBCS=EpicsSignal('MEC:PFN:CH2:CHARGE_STATE');
PFNEEN=EpicsSignal(read_pv='MEC:PFN:CH3:ENABLE_RBV',write_pv='MEC:PFN:CH3:ENABLE');
PFNECS=EpicsSignal('MEC:PFN:CH3:CHARGE_STATE');
PFNFEN=EpicsSignal(read_pv='MEC:PFN:CH4:ENABLE_RBV',write_pv='MEC:PFN:CH4:ENABLE');
PFNFCS=EpicsSignal('MEC:PFN:CH4:CHARGE_STATE');
PFNGEN=EpicsSignal(read_pv='MEC:PFN:CH5:ENABLE_RBV',write_pv='MEC:PFN:CH5:ENABLE');
PFNGCS=EpicsSignal('MEC:PFN:CH5:CHARGE_STATE');
PFNHEN=EpicsSignal(read_pv='MEC:PFN:CH6:ENABLE_RBV',write_pv='MEC:PFN:CH6:ENABLE');
PFNHCS=EpicsSignal('MEC:PFN:CH6:CHARGE_STATE');
PFNIEN=EpicsSignal(read_pv='MEC:PFN:CH7:ENABLE_RBV',write_pv='MEC:PFN:CH7:ENABLE');
PFNICS=EpicsSignal('MEC:PFN:CH7:CHARGE_STATE');
PFNJEN=EpicsSignal(read_pv='MEC:PFN:CH8:ENABLE_RBV',write_pv='MEC:PFN:CH8:ENABLE');
PFNJCS=EpicsSignal('MEC:PFN:CH8:CHARGE_STATE');
LPLPCpwr=EpicsSignal('MEC:S60:PWR:01:Outlet:6:SetControlAction')
LPLVACpwr=EpicsSignal('MEC:S60:PWR:01:Outlet:7:SetControlAction')
LPLPS1pwr=EpicsSignal('MEC:S60:PWR:01:Outlet:1:SetControlAction')
LPLHAWGpwr=EpicsSignal('MEC:64B:PWR:2:Outlet:1:SetControlAction')
XPS3pwr=EpicsSignal('MEC:64A:PWR:2:Outlet:5:SetControlAction')
XPS4pwr=EpicsSignal('MEC:64B:PWR:1:Outlet:1:SetControlAction')
XPS1IALL=EpicsSignal('MEC:LAS:MMN_0108.IALL')
XPS1RALL=EpicsSignal('MEC:LAS:MMN_0108.RALL')
XPS2IALL=EpicsSignal('MEC:LAS:MMN_0916.IALL')
XPS2RALL=EpicsSignal('MEC:LAS:MMN_0916.RALL')
XPS3IALL=EpicsSignal('MEC:LAS:MMN_1724.IALL')
XPS3RALL=EpicsSignal('MEC:LAS:MMN_1724.RALL')
XPS4IALL=EpicsSignal('MEC:LAS:MMN_2532.IALL')
XPS4RALL=EpicsSignal('MEC:LAS:MMN_2532.RALL')
OKHOSTS=['mec-monitor', 'mec-daq', 'mec-laser']
OKUSERS=['mecopr']
LMapAB=[5,100] #Pixel mapping from LeCroyA and LeCroyB horizontal axis (1002px) to Highland (140px)
LMap2=[50,1000] #Pixel mapping from LeCroy2 horizontal axis (10002px) to Highland (140px)
pwttfmap=[25,500]
PSFILEPATH='/reg/neh/operator/mecopr/mecpython/pulseshaping/'
HIGHLAND_IP = 'highland-mec-01'
LECROY_A_IP = '172.21.46.120' ###TEMP SUBSTITUTE### #'172.21.46.100'#'scope-ics-meclas-lecroy-a'
LECROY_B_IP = '172.21.46.120'#'scope-ics-meclas-lecroy-b'#
LECROY_1_IP = '172.21.46.60'#'scope-ics-mectc1-1'
LECROY_2_IP = '172.21.46.128'#'scope-ics-meclas-lecroy-02'
LECROY_L_IP = '172.21.160.252'#'scope-ics-meclas-lecroy-02'
@classmethod
def notepadPVreset(cls):
efc.wPV('MEC:LAS:FLOAT:01.DESC', 'E_synth_YFE');
efc.wPV('MEC:LAS:FLOAT:02.DESC', 'E_synth_CD1w');
efc.wPV('MEC:LAS:FLOAT:03.DESC', 'E_synth_AB1w');
efc.wPV('MEC:LAS:FLOAT:04.DESC', 'E_synth_EF1w');
efc.wPV('MEC:LAS:FLOAT:05.DESC', 'E_synth_GH1w');
efc.wPV('MEC:LAS:FLOAT:06.DESC', 'E_synth_IJ1w');
efc.wPV('MEC:LAS:FLOAT:07.DESC', 'E_synth_AB2w');
efc.wPV('MEC:LAS:FLOAT:08.DESC', 'E_synth_EF2w');
efc.wPV('MEC:LAS:FLOAT:09.DESC', 'E_synth_GH2w');
efc.wPV('MEC:LAS:FLOAT:10.DESC', 'E_synth_IJ2w');
#11: DESC is CurrExp
#12: DESC is CurrShape
efc.wPV('MEC:LAS:FLOAT:13.DESC', 'reserved');
efc.wPV('MEC:LAS:FLOAT:14.DESC', 'AB shutter state');
efc.wPV('MEC:LAS:FLOAT:15.DESC', 'EF shutter state');
efc.wPV('MEC:LAS:FLOAT:16.DESC', 'GH shutter state');
efc.wPV('MEC:LAS:FLOAT:17.DESC', 'IJ shutter state');
efc.wPV('MEC:LAS:FLOAT:18.DESC', 'WEST (ABEF) shutter state');
efc.wPV('MEC:LAS:FLOAT:19.DESC', 'EAST (GHIJ)shutter state');
efc.wPV('MEC:LAS:FLOAT:20.DESC', 'Regen shutter state');
#
efc.wPV('MEC:LAS:FLOAT:21.DESC', 'E_synth_regen');
efc.wPV('MEC:LAS:FLOAT:22.DESC', 'E_synth_TOPAS');
efc.wPV('MEC:LAS:FLOAT:23.DESC', 'E_synth_MPA1');
efc.wPV('MEC:LAS:FLOAT:24.DESC', 'E_synth_MPA2');
efc.wPV('MEC:LAS:FLOAT:25.DESC', 'reserved');
efc.wPV('MEC:LAS:FLOAT:26.DESC', 'reserved');
efc.wPV('MEC:LAS:FLOAT:27.DESC', 'reserved');
efc.wPV('MEC:LAS:FLOAT:28.DESC', 'reserved');
efc.wPV('MEC:LAS:FLOAT:29.DESC', 'reserved');
efc.wPV('MEC:LAS:FLOAT:30.DESC', 'reserved');
efc.wPV('MEC:LAS:FLOAT:31.DESC', 'E_coeff_YFE');
efc.wPV('MEC:LAS:FLOAT:32.DESC', 'E_coeff_CD1w');
efc.wPV('MEC:LAS:FLOAT:33.DESC', 'E_coeff_AB1w');
efc.wPV('MEC:LAS:FLOAT:34.DESC', 'E_coeff_EF1w');
efc.wPV('MEC:LAS:FLOAT:35.DESC', 'E_coeff_GH1w');
efc.wPV('MEC:LAS:FLOAT:36.DESC', 'E_coeff_IJ1w');
efc.wPV('MEC:LAS:FLOAT:37.DESC', 'E_coeff_AB2w');
efc.wPV('MEC:LAS:FLOAT:38.DESC', 'E_coeff_EF2w');
efc.wPV('MEC:LAS:FLOAT:39.DESC', 'E_coeff_GH2w');
efc.wPV('MEC:LAS:FLOAT:40.DESC', 'E_coeff_IJ2w');
efc.wPV('MEC:LAS:FLOAT:31', cls.EcoeffYFEval);
efc.wPV('MEC:LAS:FLOAT:32', cls.Ecoeff1in1wCDval);
efc.wPV('MEC:LAS:FLOAT:33', cls.Ecoeff2in1wABval);
efc.wPV('MEC:LAS:FLOAT:34', cls.Ecoeff2in1wEFval);
efc.wPV('MEC:LAS:FLOAT:35', cls.Ecoeff2in1wGHval);
efc.wPV('MEC:LAS:FLOAT:36', cls.Ecoeff2in1wIJval);
efc.wPV('MEC:LAS:FLOAT:37', cls.Ecoeff2in2wABval);
efc.wPV('MEC:LAS:FLOAT:38', cls.Ecoeff2in2wEFval);
efc.wPV('MEC:LAS:FLOAT:39', cls.Ecoeff2in2wGHval);
efc.wPV('MEC:LAS:FLOAT:40', cls.Ecoeff2in2wIJval);
efc.wPV('MEC:LAS:ARRAY:01.DESC', 'Psns pulse segment lengths:10')
efc.wPV('MEC:LAS:ARRAY:02.DESC', 'SSs pulse segment endpoint pairs:20')
efc.wPV('MEC:LAS:ARRAY:03.DESC', 'Highland Grafana:140')
efc.wPV('MEC:LAS:ARRAY:04.DESC', 'YFE Grafana:140')
efc.wPV('MEC:LAS:ARRAY:05.DESC', 'YFEgoal Grafana:140')
efc.wPV('MEC:LAS:ARRAY:06.DESC', '1in1w Grafana:140')
efc.wPV('MEC:LAS:ARRAY:07.DESC', '2in1w Grafana:140')
efc.wPV('MEC:LAS:ARRAY:08.DESC', '2in2w Grafana:140')
efc.wPV('MEC:LAS:ARRAY:09.DESC', '2in2wgoal Grafana:140')
efc.wPV('MEC:LAS:ARRAY:10.DESC', 'YSSs pulse segment endpoint pairs:20(1000)')
efc.wPV('MEC:LAS:ARRAY:11.DESC', 'Spare Grafana:1000')
efc.wPV('MEC:LAS:ARRAY:12.DESC', 'Spare Grafana:1000')
efc.wPV('MEC:LAS:ARRAY:13.DESC', 'Spare Grafana:1000')
efc.wPV('MEC:LAS:ARRAY:14.DESC', 'Spare Grafana:1000')
| efcunn/mecps | meclas.py | meclas.py | py | 349,079 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "numpy.array",
"line_number": 232,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 263,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 292,
"usage_type": "call"
},
{
"api_name": "numpy.cumsum",
"line_numbe... |
27490420908 | import datetime
from FileBlackHolePy import FileBlackHole, initLib, destroyLib
from MigurdiaPy import Migurdia
from json import dumps, loads
from colors import log, bcolors
from PIL import Image
from credentials import __USERNAME__, __PASSWORD__, __TOKEN__
from os.path import getsize, isfile, isdir
from pixivpy_async import AppPixivAPI
from os import remove, rename, mkdir
from shutil import copyfile
from random import randint
import aiohttp
import asyncio
tempFolder = f"./temp-{datetime.datetime.now()}"
migurdiaSessionID = ""
async def isValidImageFile(path):
try:
Image.open(path).verify()
except:
return False
return True
async def getThumbnail(path):
if not await isValidImageFile(path):
copyfile(path, path + "bc")
log(f"[!] File {path} is not a valid image.", [bcolors.FAIL])
return None
image = Image.open(path)
pixelArea = image.size[0] * image.size[1]
aspectRatio = image.size[0] / image.size[1]
# return path as it is small itself
if getsize(path) < 512000: return path
# begin creating thumbnail by creating path for it
path = path.split('.')
path = '.'.join( path[:-1] + ["thumbnail", path[-1]] )
# convert to jpeg
image = image.convert('RGB')
# if res is small, save it as jpeg and return
if pixelArea <= (512 * 512):
image.save( path )
return path
# calculate new size for image (keeping aspect ratio)
newWidth = ((512 * 512) * aspectRatio) ** 0.5
newHeight = (512 * 512) / newWidth
size = (int(newWidth), int(newHeight))
image = image.resize(size)
image.save(path)
return path
class PixivScraper(Migurdia):
downloader = None
pixivApp = None
def __init__(self):
super().__init__()
connector = aiohttp.TCPConnector(limit=4)
self.downloader = aiohttp.ClientSession(connector=connector)
self.pixivApp = AppPixivAPI()
async def quit(self):
await self.downloader.close()
async def login(self, migurdiaUsername, migurdiaPassword, pixivRefreshToken):
await self.pixivApp.login(refresh_token=pixivRefreshToken)
await super().login(migurdiaUsername, migurdiaPassword)
async def downloadFile(self, url, tries=6):
if tries == 0: return None
localFilename = f"{tempFolder}/{url.split('/')[-1]}"
if isfile(localFilename): remove(localFilename)
hs = {
'referer' : 'https://www.pixiv.net/',
'cache-control': 'no-cache',
'pragma' : 'no-cache'
}
try:
async with self.downloader.get(url, headers=hs) as response:
with open(localFilename, 'wb+') as file:
file.write( await response.read() )
if not await isValidImageFile(localFilename):
remove(localFilename)
raise Exception("Downloaded file is invalid.")
except:
x = random.randint(5, 100)
log(f"[!] Failed to download file ({tries} tries left). Waiting {x} seconds.", [bcolors.WARNING])
await asyncio.sleep(x)
return await self.downloadFile(url, tries - 1)
return localFilename
async def addPixivFile(self, fb, fileUrl, title, desc, tags):
path = await self.downloadFile(fileUrl)
if path is None:
log(f"[!] Failed to download file {fileUrl}.", [bcolors.FAIL])
return None
thumbnailPath = await getThumbnail(path)
if thumbnailPath != path: remove(path)
if thumbnailPath is None:
log(f"[!] Failed to create thumbnail of {path}.", [bcolors.FAIL])
return None
thumbnailCode = await fb.uploadFile(thumbnailPath)
remove(thumbnailPath)
if thumbnailCode is None or thumbnailCode['exitCode'] != 0:
log(f"[!] Failed to upload thumbnail of {path}.", [bcolors.FAIL])
return None
result = await super().addPost(
fileUrl,
f"https://fileblackhole.000webhostapp.com/files/{thumbnailCode['result']}",
tags,
title,
desc
)
if result is None: log(f"[!] Failed to process file {path}", [bcolors.FAIL])
if result['exitCode'] != 0:
log(f"[!] Failed to add post for {path}. (CODE: {result['exitCode']})", [bcolors.FAIL])
return result['result'][0]['result']
async def addPixivPost(self, fb, post):
log(f"[*] Processing post (ID: {post.id}).", [bcolors.OKCYAN])
urls = []
if post.page_count > 1: urls = [ post.image_urls.original for post in post.meta_pages ]
else: urls = [ post.meta_single_page.original_image_url ]
tags = [ tag.name for tag in post.tags ]
tags.append(post.user.name)
tasks = []
for url in urls:
tasks.append( self.addPixivFile(fb, url, post.title, post.caption, tags) )
result = await asyncio.gather(*tasks)
log(f"[*] Successfully processed post id {post.id}", [bcolors.OKGREEN])
for r in result:
if r is None:
with open(f"{tempFolder}/{post.id}", 'w+') as f:
f.write( dumps(result) )
async def scrapePixivAuthor(self, authorID):
log(f"[*] Scraping author id {authorID}", [bcolors.OKCYAN])
fileBlackHole = FileBlackHole()
await fileBlackHole.createSession()
tasksPA = []
next_qs = { 'user_id': authorID }
while True:
result = await self.pixivApp.user_illusts(**next_qs)
if result.illusts is None: break
for illust in result.illusts:
tasksPA.append( self.addPixivPost(fileBlackHole, illust) )
next_qs = self.pixivApp.parse_qs(result.next_url)
if next_qs is None: break
await asyncio.gather(*tasksPA)
await fileBlackHole.close()
log(f"[*] Successfully scraped author id {authorID}", [bcolors.OKGREEN])
if not isdir(tempFolder):
mkdir(tempFolder)
async def main():
with open("final.json") as f:
authors = loads(f.read())
await initLib()
scraper = PixivScraper()
await scraper.login(__USERNAME__, __PASSWORD__, __TOKEN__)
tasksA = []
for i in range(len(authors)):
if i % 3 == 0:
await asyncio.gather(*tasksA)
tasksA = []
tasksA.append( scraper.scrapePixivAuthor(int(authors[i])) )
await scraper.quit()
await destroyLib()
if __name__ == "__main__":
asyncio.run(main())
| iLikeTrioxin/PTMU | pixivToMigurdiaUploader.py | pixivToMigurdiaUploader.py | py | 6,744 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.datetime.now",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image.open",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "PIL.Image... |
21619772911 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import apache_beam as beam
from apache_beam.runners.direct import direct_runner
from apache_beam.runners.interactive import cache_manager as cache
from apache_beam.runners.interactive import pipeline_analyzer
def to_stable_runner_api(p):
"""The extra round trip ensures a stable pipeline proto.
"""
return (
beam.pipeline.Pipeline.from_runner_api(
p.to_runner_api(use_fake_coders=True), p.runner,
p._options).to_runner_api(use_fake_coders=True))
class PipelineAnalyzerTest(unittest.TestCase):
def setUp(self):
self.runner = direct_runner.DirectRunner()
self.cache_manager = cache.FileBasedCacheManager()
def tearDown(self):
self.cache_manager.cleanup()
def assertPipelineEqual(self, pipeline_proto1, pipeline_proto2):
"""A naive check for Pipeline proto equality.
"""
components1 = pipeline_proto1.components
components2 = pipeline_proto2.components
self.assertEqual(len(components1.transforms), len(components2.transforms))
self.assertEqual(
len(components1.pcollections), len(components2.pcollections))
# GreatEqual instead of Equal because the pipeline_proto_to_execute could
# include more windowing_stratagies and coders than necessary.
self.assertGreaterEqual(
len(components1.windowing_strategies),
len(components2.windowing_strategies))
self.assertGreaterEqual(len(components1.coders), len(components2.coders))
self.assertTransformEqual(
pipeline_proto1,
pipeline_proto1.root_transform_ids[0],
pipeline_proto2,
pipeline_proto2.root_transform_ids[0])
def assertTransformEqual(
self, pipeline_proto1, transform_id1, pipeline_proto2, transform_id2):
"""A naive check for Transform proto equality.
"""
transform_proto1 = pipeline_proto1.components.transforms[transform_id1]
transform_proto2 = pipeline_proto2.components.transforms[transform_id2]
self.assertEqual(transform_proto1.spec.urn, transform_proto2.spec.urn)
# Skipping payload checking because PTransforms of the same functionality
# could generate different payloads.
self.assertEqual(
len(transform_proto1.subtransforms),
len(transform_proto2.subtransforms))
self.assertSetEqual(
set(transform_proto1.inputs), set(transform_proto2.inputs))
self.assertSetEqual(
set(transform_proto1.outputs), set(transform_proto2.outputs))
def test_basic(self):
p = beam.Pipeline(runner=self.runner)
# The cold run.
pcoll = (
p
| 'Create' >> beam.Create([1, 2, 3])
| 'Double' >> beam.Map(lambda x: x * 2)
| 'Square' >> beam.Map(lambda x: x**2))
analyzer = pipeline_analyzer.PipelineAnalyzer(
self.cache_manager, to_stable_runner_api(p), self.runner)
pipeline_to_execute = beam.pipeline.Pipeline.from_runner_api(
analyzer.pipeline_proto_to_execute(), p.runner, p._options)
pipeline_to_execute.run().wait_until_finish()
self.assertEqual(
len(analyzer.tl_required_trans_ids()),
7 # Create, Double, Square, CacheSample * 3, CacheFull
)
self.assertEqual(len(analyzer.tl_referenced_pcoll_ids()), 3)
self.assertEqual(len(analyzer.read_cache_ids()), 0)
self.assertEqual(len(analyzer.write_cache_ids()), 4)
# The second run.
_ = (
pcoll
| 'Triple' >> beam.Map(lambda x: x * 3)
| 'Cube' >> beam.Map(lambda x: x**3))
analyzer = pipeline_analyzer.PipelineAnalyzer(
self.cache_manager, to_stable_runner_api(p), self.runner)
pipeline_to_execute = beam.pipeline.Pipeline.from_runner_api(
analyzer.pipeline_proto_to_execute(), p.runner, p._options)
self.assertEqual(
len(analyzer.tl_required_trans_ids()),
6 # Read, Triple, Cube, CacheSample * 2, CacheFull
)
self.assertEqual(len(analyzer.tl_referenced_pcoll_ids()), 3)
self.assertEqual(len(analyzer.read_cache_ids()), 1)
self.assertEqual(len(analyzer.write_cache_ids()), 3)
# No need to actually execute the second run.
def test_word_count(self):
p = beam.Pipeline(runner=self.runner)
class WordExtractingDoFn(beam.DoFn):
def process(self, element):
text_line = element.strip()
words = text_line.split()
return words
# Count the occurrences of each word.
pcoll1 = p | beam.Create(['to be or not to be that is the question'])
pcoll2 = pcoll1 | 'Split' >> beam.ParDo(WordExtractingDoFn())
pcoll3 = pcoll2 | 'Pair with One' >> beam.Map(lambda x: (x, 1))
pcoll4 = pcoll3 | 'Group' >> beam.GroupByKey()
pcoll5 = pcoll4 | 'Count' >> beam.Map(lambda item: (item[0], sum(item[1])))
analyzer = pipeline_analyzer.PipelineAnalyzer(
self.cache_manager, to_stable_runner_api(p), self.runner)
cache_label1 = 'PColl-1111111'
cache_label2 = 'PColl-2222222'
cache_label3 = 'PColl-3333333'
cache_label4 = 'PColl-4444444'
cache_label5 = 'PColl-5555555'
# pylint: disable=expression-not-assigned
pcoll1 | 'CacheSample%s' % cache_label1 >> cache.WriteCache(
self.cache_manager, cache_label1, sample=True, sample_size=10)
pcoll2 | 'CacheSample%s' % cache_label2 >> cache.WriteCache(
self.cache_manager, cache_label2, sample=True, sample_size=10)
pcoll3 | 'CacheSample%s' % cache_label3 >> cache.WriteCache(
self.cache_manager, cache_label3, sample=True, sample_size=10)
pcoll4 | 'CacheSample%s' % cache_label4 >> cache.WriteCache(
self.cache_manager, cache_label3, sample=True, sample_size=10)
pcoll5 | 'CacheSample%s' % cache_label5 >> cache.WriteCache(
self.cache_manager, cache_label3, sample=True, sample_size=10)
pcoll5 | 'CacheFull%s' % cache_label5 >> cache.WriteCache(
self.cache_manager, cache_label3)
expected_pipeline_proto = to_stable_runner_api(p)
self.assertPipelineEqual(
analyzer.pipeline_proto_to_execute(), expected_pipeline_proto)
pipeline_to_execute = beam.pipeline.Pipeline.from_runner_api(
analyzer.pipeline_proto_to_execute(), p.runner, p._options)
pipeline_to_execute.run().wait_until_finish()
def test_write_cache_expansion(self):
p = beam.Pipeline(runner=self.runner)
pcoll1 = p | 'Create' >> beam.Create([1, 2, 3])
pcoll2 = pcoll1 | 'Double' >> beam.Map(lambda x: x * 2)
pcoll3 = pcoll2 | 'Square' >> beam.Map(lambda x: x**2)
analyzer = pipeline_analyzer.PipelineAnalyzer(
self.cache_manager, to_stable_runner_api(p), self.runner)
cache_label1 = 'PColl-1234567'
cache_label2 = 'PColl-7654321'
cache_label3 = 'PColl-3141593'
# pylint: disable=expression-not-assigned
pcoll1 | 'CacheSample%s' % cache_label1 >> cache.WriteCache(
self.cache_manager, cache_label1, sample=True, sample_size=10)
pcoll2 | 'CacheSample%s' % cache_label2 >> cache.WriteCache(
self.cache_manager, cache_label2, sample=True, sample_size=10)
pcoll3 | 'CacheSample%s' % cache_label3 >> cache.WriteCache(
self.cache_manager, cache_label3, sample=True, sample_size=10)
pcoll3 | 'CacheFull%s' % cache_label3 >> cache.WriteCache(
self.cache_manager, cache_label3)
expected_pipeline_proto = to_stable_runner_api(p)
# Since WriteCache expansion leads to more than 50 PTransform protos in the
# pipeline, a simple check of proto map size is enough.
self.assertPipelineEqual(
analyzer.pipeline_proto_to_execute(), expected_pipeline_proto)
def test_read_cache_expansion(self):
p = beam.Pipeline(runner=self.runner)
# The cold run.
pcoll = (
p
| 'Create' >> beam.Create([1, 2, 3])
| 'Double' >> beam.Map(lambda x: x * 2)
| 'Square' >> beam.Map(lambda x: x**2))
pipeline_proto = to_stable_runner_api(p)
pipeline_info = pipeline_analyzer.PipelineInfo(pipeline_proto.components)
pcoll_id = 'ref_PCollection_PCollection_12' # Output PCollection of Square
cache_label1 = pipeline_info.cache_label(pcoll_id)
analyzer = pipeline_analyzer.PipelineAnalyzer(
self.cache_manager, pipeline_proto, self.runner)
pipeline_to_execute = beam.pipeline.Pipeline.from_runner_api(
analyzer.pipeline_proto_to_execute(), p.runner, p._options)
pipeline_to_execute.run().wait_until_finish()
# The second run.
_ = (
pcoll
| 'Triple' >> beam.Map(lambda x: x * 3)
| 'Cube' >> beam.Map(lambda x: x**3))
analyzer = pipeline_analyzer.PipelineAnalyzer(
self.cache_manager, to_stable_runner_api(p), self.runner)
expected_pipeline = beam.Pipeline(runner=self.runner)
pcoll1 = (
expected_pipeline
| 'Load%s' % cache_label1 >> cache.ReadCache(
self.cache_manager, cache_label1))
pcoll2 = pcoll1 | 'Triple' >> beam.Map(lambda x: x * 3)
pcoll3 = pcoll2 | 'Cube' >> beam.Map(lambda x: x**3)
cache_label2 = 'PColl-7654321'
cache_label3 = 'PColl-3141593'
# pylint: disable=expression-not-assigned
pcoll2 | 'CacheSample%s' % cache_label2 >> cache.WriteCache(
self.cache_manager, cache_label2, sample=True, sample_size=10)
pcoll3 | 'CacheSample%s' % cache_label3 >> cache.WriteCache(
self.cache_manager, cache_label3, sample=True, sample_size=10)
pcoll3 | 'CacheFull%s' % cache_label3 >> cache.WriteCache(
self.cache_manager, cache_label3)
# Since ReadCache & WriteCache expansion leads to more than 50 PTransform
# protos in the pipeline, a simple check of proto map size is enough.
self.assertPipelineEqual(
analyzer.pipeline_proto_to_execute(),
to_stable_runner_api(expected_pipeline))
class PipelineInfoTest(unittest.TestCase):
def setUp(self):
self.runner = direct_runner.DirectRunner()
def test_passthrough(self):
"""
Test that PTransforms which pass through their input PCollection can be
used with PipelineInfo.
"""
class Passthrough(beam.PTransform):
def expand(self, pcoll):
return pcoll
p = beam.Pipeline(runner=self.runner)
p | beam.Impulse() | Passthrough() # pylint: disable=expression-not-assigned
proto = to_stable_runner_api(p).components
info = pipeline_analyzer.PipelineInfo(proto)
for pcoll_id in info.all_pcollections():
# FIXME: If PipelineInfo does not support passthrough PTransforms, this
# will only fail some of the time, depending on the ordering of
# transforms in the Pipeline proto.
# Should not throw exception
info.cache_label(pcoll_id)
if __name__ == '__main__':
unittest.main()
| a0x8o/kafka | sdks/python/apache_beam/runners/interactive/pipeline_analyzer_test.py | pipeline_analyzer_test.py | py | 10,712 | python | en | code | 59 | github-code | 36 | [
{
"api_name": "apache_beam.pipeline.Pipeline.from_runner_api",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "apache_beam.pipeline",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "unittest.TestCase",
"line_number": 22,
"usage_type": "attribute"
... |
27898714417 | from datetime import datetime
def foo(n):
n = int(n)
global start_time
start_time = datetime.now()
list = []
i = 0
while len(list) < n:
flag = True
if i == 0 or i == 1:
i += 1
continue
for j in range(i+1):
if j == 0 or j == 1 or j == i:
continue
if i % j == 0:
flag = False
if flag == True:
list.append(i)
i += 1
return list
print(foo(n=input('Write: ')))
print(datetime.now() - start_time)
| stgolovin/js_hw | lesson2/task1.py | task1.py | py | 556 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.datetime.now",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "datetime.... |
8478439649 | #Python imports
import json
from uuid import UUID
from datetime import date
from datetime import datetime
from typing import Optional, List
#Pydantic imports
from pydantic import Field as FD
from pydantic import BaseModel as BMW
from pydantic import EmailStr
#FastAPI imports
from fastapi import FastAPI
from fastapi import status, HTTPException
from fastapi import Body, Path, Form
#This is the app
app = FastAPI()
#Those are the models to use
class UserBase(BMW):
user_id : UUID = FD(
...,
)
email: EmailStr = FD(
...,
example=""
)
class UserLogin(UserBase):
password: str = FD(
...,
min_length=8,
max_length=22,
example="" ,
)
class User(UserBase):
user_name: str = FD(
...,
min_length=5,
max_length=15,
example="@cordellatt"
)
first_name : str = FD(
...,
min_length=1,
max_length=20,
example="",
)
last_name : str = FD(
...,
min_length=1,
max_length=20,
example="",
)
birth_date: Optional[date] = FD(default=None)
class UserRegister(User):
password: str = FD(
...,
min_length=8,
max_length=22,
example=""
)
class Tweet(BMW):
tweet_id : str = FD(
...,
min_length=5,
max_length=15,
)
content: str = FD(
...,
min_length=1,
max_length=256,
example="Hello dear. This is the first tweet from my Api"
)
created_at: datetime = FD(default=datetime.now())
updated_at: Optional[datetime] = FD(default=None)
by: UserBase = FD (...)
# Troll
@app.post(
path="/F",
tags=["Troll"]
)
def troll():
return "Me está llevando el diablo, send help"
#Home: Here you can see all tweets
@app.get(
path="/",
response_model= List[Tweet],
status_code= status.HTTP_200_OK,
summary="Show all the tweets",
tags=["Home"],
)
def home():
"""
Show all tweets
In this page you can see all the tweers in the app
Parameters:
-
Returns a Json whith the basics users information:
- tweet:id : UUID
- content : str
- created : datetime
- updated : Optional[datetime]
- by: UserBase
"""
with open("tweets.json", "r", encoding="utf-8") as tweets_data:
results = json.loads(tweets_data.read())
return results
#Login
@app.post(
path="/login/{user_name}",
response_model= User,
status_code= status.HTTP_200_OK,
summary="Login a user",
tags=["User"],
)
def login(
user_name: str = Path(..., title="User name"),
password: str = Form(..., title="Password")
):
"""
Login
This Path Operation login the user in the app...
Parameters:
- Request body parameter:
- user: UserRegister
Returns a Json whith the basics users information:
- user id : UUID
- email : Emailstr
- first name: str
- last name: str
- birth date: datetime
"""
with open("user.json", "r", encoding="utf-8") as user_data:
result: list[dict] = json.load(user_data)
user_position = 0
while user_position < len(result) + 1:
for user in result:
if result[user_position]["user_name"] == user_name and result[user_position]["password"] == password:
return user
else:
user_position = user_position + 1
if user_position == len(result):
raise HTTPException(
status_code = status.HTTP_404_NOT_FOUND,
detail="That user don't exist!")
## Path Operations
### Users
#### Register a user
@app.post(
path="/signup",
response_model= User,
status_code= status.HTTP_201_CREATED,
summary="Register a new User",
tags=["User"],
)
def signup(user: UserRegister = Body(...)):
"""
Sign-Up
Register a user in the app
Parameters:
- Request body parameter
- user: UserRegister
Returns a Json whith the basice user information:
- user id : UUID
- email : Emailstr
- first name: str
- last name: str
- birth date: datetime
"""
with open("user.json", "r+", encoding="utf-8") as user_data:
results = json.loads(user_data.read())
user_dict = user.dict()
user_dict["user_id"] = str(user_dict["user_id"])
user_dict["birth_date"] = str(user_dict["birth_date"])
results.append(user_dict)
user_data.seek(0)
user_data.write(json.dumps(results))
return user
#### Show all user
@app.get(
path="/users",
response_model= List[User],
status_code= status.HTTP_200_OK,
summary="Show all the users",
tags=["User"],
)
def show_all_users():
"""
Show all users
In this page you can see all the users in the app
Parameters:
- Request body parameter:
- user: UserRegister
Returns a Json whith the basics users information:
- user id : UUID
- email : Emailstr
- first name: str
- last name: str
- birth date: datetime
"""
with open("user.json", "r", encoding="utf-8") as user_data:
results = json.loads(user_data.read())
return results
#### Show a user:
@app.get(
path="/user/{user_name}",
response_model= User,
status_code= status.HTTP_200_OK,
summary="Show a User",
tags=["User"],
)
def show_a_user(user_name: str = Path(..., title="User name")):
"""
Show a user
In this page you can see a user with her user name
Parameters:
- User's user name
Returns a Json whith the basics users information:
- user id : UUID
- email : Emailstr
- user name: str
- first name: str
- last name: str
- birth date: datetime
"""
with open("user.json", "r", encoding="utf-8") as user_data:
result: list[dict] = json.load(user_data)
user_position = 0
while user_position < len(result) + 1:
for user in result:
if result[user_position]["user_name"] == user_name:
return user
if not result[user_position]["user_name"] == user_name:
user_position = user_position + 1
if user_position == len(result):
raise HTTPException(
status_code = status.HTTP_404_NOT_FOUND,
detail="That user don't exist!")
#### Delete a user
@app.delete(
path="/user/{user_name}/delete",
response_model= str,
status_code= status.HTTP_200_OK,
summary="Delete a user",
tags=["User"],
)
def delete_user(user_name: str = Path(..., title="User name")):
"""
Delete a user
In this page you can delete a user with her user name
Parameters:
- User's user name
Returns a message with the result
"""
with open("user.json", "r+", encoding="utf-8") as user_data:
result: list[dict] = json.load(user_data)
for user in result:
if user["user_name"] == str(user_name):
result.remove(user)
with open("user.json", "w", encoding="utf-8") as user_data:
user_data.seek(0)
json.dump(result, user_data)
return f"The user {user_name} was be eliminated"
raise HTTPException(
status_code = status.HTTP_404_NOT_FOUND,
detail="This user don't exist!"
)
#### Update a user
@app.put(
path="/user/{user_name}/update/",
response_model= User,
status_code= status.HTTP_200_OK,
summary="Update a user",
tags=["User"],
)
def update_user(
user_name : str = Path(
...,
title="User name",
description="Please insert the user that you want update",
),
user: UserRegister = Body(...),
):
"""
Update a user
In this page you can update a user with his user name
Parameters:
user name: str
user: UserRegister
-
Returns the new tweet model
"""
user_name = str(user_name)
user_dict = user.dict()
user_dict['user_id'] = str(user_dict['user_id'])
user_dict['birth_date'] = str(user_dict['birth_date'])
with open("user.json", "r+", encoding="utf-8") as user_data:
result = json.loads(user_data.read())
for user in result:
if user['user_name'] == user_name:
result[result.index(user)] = user_dict
with open("user.json", "w", encoding="utf-8") as user_data:
user_data.seek(0)
user_data.write(json.dumps(result))
return user_dict
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail="Ese usuario no existe")
### Tweets
#### Post a tweet
@app.post(
path="/post",
response_model=Tweet,
status_code=status.HTTP_201_CREATED,
summary="Post a tweet",
tags=["Tweets"]
)
def post_a_tweet(tweet : Tweet = Body(...)):
"""
Post a Tweet
In this page you can create a new tweet
Parameters:
- Request body parameter:
- tweet: Tweets
Returns a Json whith the basics users information:
- tweet:id : UUID
- content : str
- created : datetime
- updated : Optional[datetime]
- by: UserBase
"""
with open("tweets.json", "r+", encoding="utf-8") as tweet_data:
results = json.loads(tweet_data.read())
tweet_dict = tweet.dict()
tweet_dict["tweet_id"] = str(tweet_dict["tweet_id"])
tweet_dict["created_at"] = str(tweet_dict["created_at"])
if tweet_dict["updated_at"] is not None:
tweet_dict["updated_at"] = str(tweet_dict["updated_at"])
tweet_dict["by"]["user_id"] = str(tweet_dict["by"]["user_id"])
tweet_dict["by"]["email"] = str(tweet_dict["by"]["email"])
results.append(tweet_dict)
tweet_data.seek(0)
tweet_data.write(json.dumps(results))
return tweet
#### Show a tweet
@app.get(
path="/tweets/{tweet_id}",
response_model= Tweet,
status_code= status.HTTP_200_OK,
summary="Show a tweet",
tags=["Tweets"],
)
def show_a_tweet(tweet_id: str = Path(..., title="First name")):
"""
Show a tweet
In this page you can see a user with her first name
Parameters:
- User's first name
Returns a Json whith the basics users information:
- tweet:id : UUID
- content : str
- created : datetime
- updated : Optional[datetime]
- by: UserBase
"""
with open("tweets.json", "r", encoding="utf-8") as tweets_data:
result: list[dict] = json.load(tweets_data)
tweets_position = 0
while tweets_position < len(result) + 1:
for tweets in result:
if result[tweets_position]["tweet_id"] == tweet_id:
return tweets
if not result[tweets_position]["tweet_id"] == tweet_id:
tweets_position = tweets_position + 1
if tweets_position == len(result):
raise HTTPException(
status_code = status.HTTP_404_NOT_FOUND,
detail="That tweet don't exist!")
#### Delete a tweet
@app.delete(
path="/tweets/{tweet_id}/delete",
response_model= str,
status_code= status.HTTP_200_OK,
summary="Delete a tweet",
tags=["Tweets"],
)
def delete_tweet(tweet_id : str = Path(..., title="Tweet Id")):
"""
Delete a tweet
In this page you can delete a tweet with his id
Parameters:
-
Returns a message with the result.
"""
with open("tweets.json", "r+", encoding="utf-8") as tweets_data:
result: list[dict] = json.load(tweets_data)
for tweet in result:
if tweet["tweet_id"] == str(tweet_id):
result.remove(tweet)
with open("tweets.json", "w", encoding="utf-8") as tweets_data:
tweets_data.seek(0)
json.dump(result, tweets_data)
return f"The tweet {tweet_id} was be eliminated"
raise HTTPException(
status_code = status.HTTP_404_NOT_FOUND,
detail="This tweet don't exist!"
)
#### Update a tweet
@app.put(
path="/tweets/{tweet_id}/update",
response_model= Tweet,
status_code= status.HTTP_200_OK,
summary="Update a tweet",
tags=["Tweets"],
)
def update_tweet(
tweet_id : str = Path(
...,
title="Tweet Id"
),
new_content: str = Form(
...,
title="New tweet information")
):
"""
Update a tweet
In this page you can update a tweet content with his id
Parameters:
tweet id: str
new_content: str
-
Returns the new tweet model
"""
with open("tweets.json", "r+", encoding="utf-8") as tweets_data:
result: list[dict] = json.load(tweets_data)
for tweet in result:
if tweet['tweet_id'] == str(tweet_id):
tweet['content'] = new_content
with open("tweets.json", "w", encoding="utf-8") as tweets_data:
tweets_data.seek(0)
tweets_data.write(json.dumps(result))
return tweet
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail="Ese tweet no existe") | davidcordellatt/Twitter-Api-Fastapi-practice- | main.py | main.py | py | 13,686 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "fastapi.FastAPI",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pydantic.BaseModel",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "uuid.UUID",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "pydantic.Field",
"li... |
16759793113 | import datetime
from ..command_system import Command
from ...models import Client, Schedule, Teacher, Subject
from ...utils import awesome_date
def next_lesson(domain):
client = Client.query.filter(Client.social_network == 'https://vk.com/'+ domain['domain']).first()
schedule = Schedule.query.filter((Schedule.client_id == client.id) & (Schedule.time > datetime.datetime.now())).all()
if client:
if schedule:
teacher = Client.query.get_or_404(Teacher.query.get_or_404(schedule[-1].teacher_id).login_id)
subject = Subject.query.get_or_404(schedule[-1].subject_id)
message = 'Немного информации о следующем заняии:\n'+'Оно пройдет '+awesome_date(schedule[-1].time)+'\nПреподаватель - '+teacher.first_name+' '+teacher.last_name
else:
message = 'Кхм... А следующего занятия нет. Если ты считаешь, что это ошибка, то напиши, пожалуйста, в поддержку.'
else:
message = 'Прости, но это доступно только для клиентов. Если ты уже клиент, то пожалуйста зайди на http://edukato.ru/chat-bot и укажи адрес страницы'
return message, ''
next_lesson_command = Command()
next_lesson_command.keys = ['следующее занятие', 'когда следующее занятие', 'когда следующий урок', 'урок следующий', 'занятие следующее']
next_lesson_command.description = 'Расскажу о следующем занятии *'
next_lesson_command.process = next_lesson | edukato/learning | app/home/commands/next_lesson.py | next_lesson.py | py | 1,707 | python | ru | code | 0 | github-code | 36 | [
{
"api_name": "models.Client.query.filter",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "models.Client.query",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "models.Client",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "models... |
6519049093 | from tkinter import*
import tkinter as tk
import cv2
import numpy as np
import webbrowser
#GRAY ONLY
def obj():
thres = 0.45
nms_threshold = 0.2
cap = cv2.VideoCapture(0)
className = []
classFile = 'coco.names'
with open(classFile, 'rt') as f:
className = f.read().rstrip('\n').split('\n')
# print(classNames)
configPath = 'ssd_mobilenet_v3_large_coco_2020_01_14.pbtxt'
weightsPath = 'frozen_inference_graph.pb'
net = cv2.dnn_DetectionModel(weightsPath, configPath)
net.setInputSize(320, 320)
net.setInputScale(1.0 / 127.5)
net.setInputMean((127.5, 127.5, 127.5))
net.setInputSwapRB(True)
while True:
success, img = cap.read()
classIds, confs, bbox = net.detect(img, confThreshold=thres)
bbox = list(bbox)
confs = list(np.array(confs).reshape(1, -1)[0])
confs = list(map(float, confs))
# print(type(confs[0]))
# print(confs)
indices = cv2.dnn.NMSBoxes(bbox, confs, thres, nms_threshold)
for i in indices:
i = i[0]
box = bbox[i]
x, y, w, h = box[0], box[1], box[2], box[3]
cv2.rectangle(img, (x, y), (x + w, h + y), color=(0, 255, 0), thickness=2)
cv2.putText(img, className[classIds[i][0] - 1].upper(), (box[0] + 10, box[1] + 30),
cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0), 2)
cv2.imshow("Display Object Detection Q for EXIT", img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
#GRAY ONLY
def GRAY():
path = cv2.VideoCapture(0)
while True:
success, img = path.read()
imgc = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cv2.imshow("Display GRAY Q for EXIT",imgc)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
#HUE ONLY
def HLS():
path = cv2.VideoCapture(0)
while True:
success, img = path.read()
imgHLS = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)
cv2.imshow("Display HLS Q for EXIT",imgHLS)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
#HSV
def HSV():
path = cv2.VideoCapture(0)
while True:
success, img = path.read()
imgHSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
cv2.imshow("Display HSV Q for EXIT", imgHSV)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
#VIDEO + REC
def Recandvid():
def empty(a):
pass
def stackImages(scale, imgArray):
rows = len(imgArray)
cols = len(imgArray[0])
rowsAvailable = isinstance(imgArray[0], list)
width = imgArray[0][0].shape[1]
height = imgArray[0][0].shape[0]
if rowsAvailable:
for x in range(0, rows):
for y in range(0, cols):
if imgArray[x][y].shape[:2] == imgArray[0][0].shape[:2]:
imgArray[x][y] = cv2.resize(imgArray[x][y], (0, 0), None, scale, scale)
else:
imgArray[x][y] = cv2.resize(imgArray[x][y], (imgArray[0][0].shape[1], imgArray[0][0].shape[0]),
None, scale, scale)
if len(imgArray[x][y].shape) == 2: imgArray[x][y] = cv2.cvtColor(imgArray[x][y], cv2.COLOR_GRAY2BGR)
imageBlank = np.zeros((height, width, 3), np.uint8)
hor = [imageBlank] * rows
hor_con = [imageBlank] * rows
for x in range(0, rows):
hor[x] = np.hstack(imgArray[x])
ver = np.vstack(hor)
else:
for x in range(0, rows):
if imgArray[x].shape[:2] == imgArray[0].shape[:2]:
imgArray[x] = cv2.resize(imgArray[x], (0, 0), None, scale, scale)
else:
imgArray[x] = cv2.resize(imgArray[x], (imgArray[0].shape[1], imgArray[0].shape[0]), None, scale,
scale)
if len(imgArray[x].shape) == 2: imgArray[x] = cv2.cvtColor(imgArray[x], cv2.COLOR_GRAY2BGR)
hor = np.hstack(imgArray)
ver = hor
return ver
path = cv2.VideoCapture(0)
cv2.namedWindow("TrackBars")
cv2.resizeWindow("TrackBars", 640, 240)
cv2.createTrackbar("Hue Min", "TrackBars", 0, 179, empty)
cv2.createTrackbar("Hue Max", "TrackBars", 19, 179, empty)
cv2.createTrackbar("Sat Min", "TrackBars", 110, 255, empty)
cv2.createTrackbar("Sat Max", "TrackBars", 240, 255, empty)
cv2.createTrackbar("Val Min", "TrackBars", 153, 255, empty)
cv2.createTrackbar("Val Max", "TrackBars", 255, 255, empty)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter("Recoded_Vid.avi", fourcc, 20.0, (740, 480))
while True:
success, img = path.read()
imgHSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
h_min = cv2.getTrackbarPos("Hue Min", "TrackBars")
h_max = cv2.getTrackbarPos("Hue Max", "TrackBars")
s_min = cv2.getTrackbarPos("Sat Min", "TrackBars")
s_max = cv2.getTrackbarPos("Sat Max", "TrackBars")
v_min = cv2.getTrackbarPos("Val Min", "TrackBars")
v_max = cv2.getTrackbarPos("Val Max", "TrackBars")
print(h_min, h_max, s_min, s_max, v_min, v_max)
lower = np.array([h_min, s_min, v_min])
upper = np.array([h_max, s_max, v_max])
mask = cv2.inRange(imgHSV, lower, upper)
imgResult = cv2.bitwise_and(img, img, mask=mask)
imgStack = stackImages(0.6, ([img, imgHSV], [mask, imgResult]))
cv2.imshow("All Display and Recording.... Q for EXIT", imgStack)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
def callback(event):
webbrowser.open_new(event.widget.cget("text"))
root =Tk()
button1= Button(root,text="Video and Record",bg='#47d1d1', command=Recandvid).place(x=40, y=70)
button2= Button(root,text="HSV",bg='#47d1d1',command=HSV).place(x=180, y=70)
button3= Button(root,text="HLS",bg='#47d1d1', command=HLS).place(x=230, y=70)
button4= Button(root,text="GRAY",bg='#47d1d1', command=GRAY).place(x=280, y=70)
button5= Button(root,text="Object Detection",bg='#47d1d1', command=obj).place(x=360, y=70)
lbl = tk.Label(root, text=r"https://github.com/jahin44", fg="blue", cursor="hand2")
lbl.pack()
Label(root, text=" Click here to get more software =>", bg="#eee6ff", fg="black").place(x=0, y=0)
lbl.bind("<Button-1>", callback)
lbl1 = tk.Label(root, text=r"https://jahinhasan.blogspot.com", fg="blue", cursor="hand2")
Label(root, text=" Click here to know about me =>", bg="#eee6ff", fg="black").place(x=0, y=20)
lbl1.pack()
lbl1.bind("<Button-1>", callback)
root.geometry("560x300+120+200")
root.mainloop() | jahin44/Python | pythonopencv/pro_gui.py | pro_gui.py | py | 6,916 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "cv2.VideoCapture",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "cv2.dnn_DetectionModel",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "cv2.dnn.NMSBoxes"... |
23306080113 | import streamlit as st
import plotting
def stats_view(dfmedicine, dfuniques, visualization_mode):
st.markdown("## Conteos generales del hospital respecto a los pacientes ingresados por trauma")
metric_values = zip(dfuniques.iloc[:, 0].values, dfuniques.iloc[:, 1].values)
metric_columns = st.columns(3)
metric_count = 0
for label, value in metric_values:
metric_columns[metric_count].metric(label, value)
metric_count += 1
barchart = plotting.group_comparisons_bar(dfmedicine, "Medicamento o Insumo", "Precio",
"Comparativa de los precios de los medicamentos", "Costo")
st.markdown("## Resumen del costo promedio de los medicamentos")
if visualization_mode == "Gráfica":
st.markdown("Haga click sobre el gráfico para obtener mayor información acerca del medicamento")
st.plotly_chart(barchart)
else:
st.dataframe(dfmedicine) | SimonPGM/ds4adashboard | Dashboard/generalstatsvis.py | generalstatsvis.py | py | 937 | python | es | code | 0 | github-code | 36 | [
{
"api_name": "streamlit.markdown",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "streamlit.columns",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "plotting.group_comparisons_bar",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "str... |
4585824107 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 30 20:16:19 2020
@author: Antonio Hickey
"""
#-------------------------------------------------------------------
# Importing Modules
from bs4 import BeautifulSoup as soup
from urllib.request import urlopen as uReq
import pandas as pd
import csv
#-------------------------------------------------------------------
# Config
url = 'https://www.treasury.gov/resource-center/data-chart-center/interest-rates/pages/textview.aspx?data=yield'
uClient = uReq(url)
page_soup = soup(uClient.read(), "html.parser")
uClient.close()
# Defining Elements
Elements = page_soup.findAll("td", {"class" : "text_view_data"})
# Splitting most recent elements
date = Elements[-13].text
mo1 = Elements[-12].text
mo2 = Elements[-11].text
mo3 = Elements[-10].text
mo6 = Elements[-9].text
yr1 = Elements[-8].text
yr2 = Elements[-7].text
yr3 = Elements[-6].text
yr5 = Elements[-5].text
yr7 = Elements[-4].text
yr10 = Elements[-3].text
yr20 = Elements[-2].text
yr30 = Elements[-1].text
# Defining New Data
data = (date, mo1, mo2, mo3, mo6, yr1, yr2, yr3, yr5, yr7, yr10, yr20, yr30)
# Appending Dataset with New Elements
with open(r'Yeild Curve.csv', 'a') as f:
writer = csv.writer(f)
writer.writerow(data)
| antonio-hickey/Economics | Yeild Curve/Data Collection/Web_Crawler_Bot.py | Web_Crawler_Bot.py | py | 1,268 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "urllib.request.urlopen",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "csv.writer",
"line_number": 42,
"usage_type": "call"
}
] |
7927334038 | from PyQt5.QtWidgets import (QWidget, QCalendarWidget,QLabel, QApplication, QVBoxLayout,QPushButton)
from PyQt5.QtCore import QDate
import sys
global ap
class Example(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
vbox = QVBoxLayout(self)
ap=''
cal = QCalendarWidget(self)
cal.setGridVisible(True)
cal.clicked[QDate].connect(self.showDate)
vbox.addWidget(cal)
self.lbl = QLabel(self)
date = cal.selectedDate()
self.lbl.setText(date.toString())
self.ok = QPushButton(self)
self.ok.setText("OK")
self.ok.clicked.connect(self.okay)
vbox.addWidget(self.lbl)
vbox.addWidget(self.ok)
self.setLayout(vbox)
self.setGeometry(300, 300, 350, 300)
self.setWindowTitle('Calendar')
self.show()
def showDate(self, date):
self.lbl.setText(date.toString())
#nonlocal ap
ap= date.toString()
file = open("from.txt", mode='w')
file.write('%s' %ap)
file.close()
print(ap)
def okay(self):
self.close()
if __name__ == '__main__':
app = QApplication(sys.argv)
#doot = QWidget()
ex = Example()
#ex.initUI(doot)
#ex.show()
sys.exit(app.exec_())
| fernandezjared1/Vodafone-Idea---PS1 | cal.py | cal.py | py | 1,361 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "PyQt5.QtWidgets.QWidget",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QVBoxLayout",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QCalendarWidget",
"line_number": 12,
"usage_type": "call"
},
{
... |
27052806329 | from typing import List
class Solution:
def _fizzBuzz(self, number):
if number % 15 == 0:
return "FizzBuzz"
elif number % 5 == 0:
return "Buzz"
elif number % 3 == 0:
return "Fizz"
else:
return str(number)
def fizzBuzz(self, n: int) -> List[str]:
return [self._fizzBuzz(i) for i in range(1, n + 1)]
| ikedaosushi/leetcode | problems/python/fizzBuzz.py | fizzBuzz.py | py | 397 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "typing.List",
"line_number": 15,
"usage_type": "name"
}
] |
27878022936 | # First things, first. Import the wxPython package.
from concurrent.futures import thread
from tracemalloc import start
from turtle import pos
from numpy import size, true_divide
import wx
from wx.adv import *
from Utilities.media_utils import batchDownload, download, spotifyToSearches, ytFromLink
import misc
from spotify_dl import spotify
from spotipy.oauth2 import SpotifyClientCredentials
import threading
from multiprocessing import Process
import ctypes
import time
#Account for high DPI screens on windows
try:
ctypes.windll.shcore.SetProcessDpiAwareness(True)
except:
pass
# Next, create an application object.
app = wx.App()
class MainFrame(wx.Frame):
def __init__(self, parent, mytitle, mysize):
wx.Frame.__init__(self, parent, wx.ID_ANY, mytitle, size=mysize,
style=wx.DEFAULT_FRAME_STYLE^wx.RESIZE_BORDER^wx.MAXIMIZE_BOX)
def getOption():
if ytl.Value:
return "link"
elif spp.Value:
return "playlist"
elif spa.Value:
return "album"
elif spt.Value:
return "track"
else:
return -1
frm = MainFrame(None,"PyraTunes",(680,230))
frm.SetIcon(wx.Icon("misc/FrameIcon.png"))
#Set up panel
pan = wx.Panel(frm)
radiolabel = wx.StaticText(pan,id=1, label="Choose a download option:", pos=(132,75))
f1label = wx.StaticText(pan, label="Enter URL:", pos=(21,15))
f12label = wx.StaticText(pan, label="Enter Target Folder Name:", pos=(351,15))
ytl = wx.RadioButton(pan, label="Youtube Link", style=0, pos=(132,100))
spp = wx.RadioButton(pan, label="Spotify Playlist", style=0, pos=(285,100))
spa = wx.RadioButton(pan, label="Spotify Album", style=0, pos=(132,125))
spt = wx.RadioButton(pan, label="Spotify Track", style=0, pos=(285,125))
url_field = wx.TextCtrl(pan, pos=(20,40), size=(320,30), name="Enter URL")
target_field = wx.TextCtrl(pan, pos=(390,40), size=(250,30), name="Enter directory")
target_field.SetHint("Downloads")
dl_button = wx.Button(pan, label="Download", pos=(20,80), size=(102,55))
folder_img = wx.Image("misc/folder.png")
folder_img = folder_img.Scale(26,26, wx.IMAGE_QUALITY_HIGH)
dir_button = wx.BitmapButton(pan, bitmap=folder_img.ConvertToBitmap(), pos=(350,40))
loading_anim = wx.ActivityIndicator(parent=pan, size=(50 , 50), pos=(550 , 90))
loading_anim.Hide()
#DL Button Background Function
def dlfunc():
dp = target_field.Value
if url_field.Value == "":
loading_anim.Stop()
loading_anim.Hide()
return
if dp == "":
dp = "Downloads"
if getOption() == "link":
download(ytFromLink(url_field.Value), dp)
else:
batchDownload(spotifyToSearches(url_field.Value, getOption()),dp)
loading_anim.Stop()
loading_anim.Hide()
#Define directory button functionality
def onClickDirectory(event):
dlg = wx.DirDialog(None, "Select folder to download in", "Downloads", wx.DD_DEFAULT_STYLE)
if dlg.ShowModal() == wx.ID_OK:
target_field.SetValue(dlg.GetPath())
dlg.Destroy()
#Define Download Button functionality
def onClickDownload(event):
loading_anim.Show()
loading_anim.Start()
thread_downloader = threading.Thread(target=dlfunc)
thread_downloader.start()
dl_button.Bind(wx.EVT_BUTTON, onClickDownload)
dir_button.Bind(wx.EVT_BUTTON, onClickDirectory)
# Show frame.
frm.Show()
#Gui Thread Start
app.MainLoop() | missing-atabey/PyraTunes | main.py | main.py | py | 3,400 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "ctypes.windll.shcore.SetProcessDpiAwareness",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "ctypes.windll",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "wx.App",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": ... |
21600216675 | # -*- coding: utf-8 -*-
"""
ReferIt, UNC, UNC+ and GRef referring image segmentation PyTorch dataset.
Define and group batches of images, segmentations and queries.
Based on:
https://github.com/chenxi116/TF-phrasecut-public/blob/master/build_batches.py
"""
import os
import re
# import cv2
import sys
import json
import torch
import numpy as np
import os.path as osp
import scipy.io as sio
import torch.utils.data as data
sys.path.append('.')
from PIL import Image
from pytorch_pretrained_bert.tokenization import BertTokenizer
from utils.word_utils import Corpus
def read_examples(input_line, unique_id):
"""Read a list of `InputExample`s from an input file."""
examples = []
# unique_id = 0
line = input_line #reader.readline()
# if not line:
# break
line = line.strip()
text_a = None
text_b = None
m = re.match(r"^(.*) \|\|\| (.*)$", line)
if m is None:
text_a = line
else:
text_a = m.group(1)
text_b = m.group(2)
examples.append(
InputExample(unique_id=unique_id, text_a=text_a, text_b=text_b))
# unique_id += 1
return examples
## Bert text encoding
class InputExample(object):
def __init__(self, unique_id, text_a, text_b):
self.unique_id = unique_id
self.text_a = text_a
self.text_b = text_b
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, unique_id, tokens, input_ids, input_mask, input_type_ids):
self.unique_id = unique_id
self.tokens = tokens
self.input_ids = input_ids
self.input_mask = input_mask
self.input_type_ids = input_type_ids
def convert_examples_to_features(examples, seq_length, tokenizer):
"""Loads a data file into a list of `InputBatch`s."""
features = []
for (ex_index, example) in enumerate(examples):
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if tokens_b:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > seq_length - 2:
tokens_a = tokens_a[0:(seq_length - 2)]
tokens = []
input_type_ids = []
tokens.append("[CLS]")
input_type_ids.append(0)
for token in tokens_a:
tokens.append(token)
input_type_ids.append(0)
tokens.append("[SEP]")
input_type_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
input_type_ids.append(1)
tokens.append("[SEP]")
input_type_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < seq_length:
input_ids.append(0)
input_mask.append(0)
input_type_ids.append(0)
assert len(input_ids) == seq_length
assert len(input_mask) == seq_length
assert len(input_type_ids) == seq_length
features.append(
InputFeatures(
unique_id=example.unique_id,
tokens=tokens,
input_ids=input_ids,
input_mask=input_mask,
input_type_ids=input_type_ids))
return features
class DatasetNotFoundError(Exception):
pass
class TransVGDataset(data.Dataset):
SUPPORTED_DATASETS = {
'referit': {'splits': ('train', 'val', 'trainval', 'test')},
'unc': {
'splits': ('train', 'val', 'trainval', 'testA', 'testB'),
'params': {'dataset': 'refcoco', 'split_by': 'unc'}
},
'unc+': {
'splits': ('train', 'val', 'trainval', 'testA', 'testB'),
'params': {'dataset': 'refcoco+', 'split_by': 'unc'}
},
'gref': {
'splits': ('train', 'val'),
'params': {'dataset': 'refcocog', 'split_by': 'google'}
},
'gref_umd': {
'splits': ('train', 'val', 'test'),
'params': {'dataset': 'refcocog', 'split_by': 'umd'}
},
'flickr': {
'splits': ('train', 'val', 'test')}
}
def __init__(self, data_root, split_root='data', dataset='referit',
transform=None, return_idx=False, testmode=False,
split='train', max_query_len=128, lstm=False,
bert_model='bert-base-uncased'):
self.images = []
self.data_root = data_root
self.split_root = split_root
self.dataset = dataset
self.query_len = max_query_len
self.lstm = lstm
self.transform = transform
self.testmode = testmode
self.split = split
self.tokenizer = BertTokenizer.from_pretrained(bert_model, do_lower_case=True)
self.return_idx=return_idx
assert self.transform is not None
if split == 'train':
self.augment = True
else:
self.augment = False
if self.dataset == 'referit':
self.dataset_root = osp.join(self.data_root, 'referit')
self.im_dir = osp.join(self.dataset_root, 'images')
self.split_dir = osp.join(self.dataset_root, 'splits')
elif self.dataset == 'flickr':
self.dataset_root = osp.join(self.data_root, 'Flickr30k')
self.im_dir = osp.join(self.dataset_root, 'flickr30k_images')
else: ## refcoco, etc.
self.dataset_root = osp.join(self.data_root, 'other')
self.im_dir = osp.join(
self.dataset_root, 'images', 'mscoco', 'images', 'train2014')
self.split_dir = osp.join(self.dataset_root, 'splits')
if not self.exists_dataset():
# self.process_dataset()
print('Please download index cache to data folder: \n \
https://drive.google.com/open?id=1cZI562MABLtAzM6YU4WmKPFFguuVr0lZ')
exit(0)
dataset_path = osp.join(self.split_root, self.dataset)
valid_splits = self.SUPPORTED_DATASETS[self.dataset]['splits']
if self.lstm:
self.corpus = Corpus()
corpus_path = osp.join(dataset_path, 'corpus.pth')
self.corpus = torch.load(corpus_path)
if split not in valid_splits:
raise ValueError(
'Dataset {0} does not have split {1}'.format(
self.dataset, split))
splits = [split]
if self.dataset != 'referit':
splits = ['train', 'val'] if split == 'trainval' else [split]
for split in splits:
imgset_file = '{0}_{1}.pth'.format(self.dataset, split)
imgset_path = osp.join(dataset_path, imgset_file)
self.images += torch.load(imgset_path)
def exists_dataset(self):
return osp.exists(osp.join(self.split_root, self.dataset))
def pull_item(self, idx):
if self.dataset == 'flickr':
img_file, bbox, phrase = self.images[idx]
else:
img_file, _, bbox, phrase, attri = self.images[idx]
## box format: to x1y1x2y2
if not (self.dataset == 'referit' or self.dataset == 'flickr'):
bbox = np.array(bbox, dtype=int)
bbox[2], bbox[3] = bbox[0]+bbox[2], bbox[1]+bbox[3]
else:
bbox = np.array(bbox, dtype=int)
img_path = osp.join(self.im_dir, img_file)
img = Image.open(img_path).convert("RGB")
# img = cv2.imread(img_path)
# ## duplicate channel if gray image
# if img.shape[-1] > 1:
# img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# else:
# img = np.stack([img] * 3)
bbox = torch.tensor(bbox)
bbox = bbox.float()
return img, phrase, bbox
def tokenize_phrase(self, phrase):
return self.corpus.tokenize(phrase, self.query_len)
def untokenize_word_vector(self, words):
return self.corpus.dictionary[words]
def __len__(self):
return len(self.images)
def __getitem__(self, idx):
img, phrase, bbox = self.pull_item(idx)
# phrase = phrase.decode("utf-8").encode().lower()
phrase = phrase.lower()
input_dict = {'img': img, 'box': bbox, 'text': phrase}
input_dict = self.transform(input_dict)
img = input_dict['img']
bbox = input_dict['box']
phrase = input_dict['text']
img_mask = input_dict['mask']
if self.lstm:
phrase = self.tokenize_phrase(phrase)
word_id = phrase
word_mask = np.array(word_id>0, dtype=int)
else:
## encode phrase to bert input
examples = read_examples(phrase, idx)
features = convert_examples_to_features(
examples=examples, seq_length=self.query_len, tokenizer=self.tokenizer)
word_id = features[0].input_ids
word_mask = features[0].input_mask
if self.testmode:
return img, np.array(word_id, dtype=int), np.array(word_mask, dtype=int), \
np.array(bbox, dtype=np.float32), np.array(ratio, dtype=np.float32), \
np.array(dw, dtype=np.float32), np.array(dh, dtype=np.float32), self.images[idx][0]
else:
# print(img.shape)
return img, np.array(img_mask), np.array(word_id, dtype=int), np.array(word_mask, dtype=int), np.array(bbox, dtype=np.float32) | djiajunustc/TransVG | datasets/data_loader.py | data_loader.py | py | 9,900 | python | en | code | 134 | github-code | 36 | [
{
"api_name": "sys.path.append",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "re.match",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.Dataset",
... |
42843762688 | import pytest
import time
import json
import logging
from error_code.error_status import SignatureStatus
from automation_framework.utilities.workflow import submit_request
from automation_framework.work_order_get_result.work_order_get_result_params \
import WorkOrderGetResult
import avalon_client_sdk.worker.worker_details as worker
from automation_framework.utilities.request_args import GetResultWaitTime
logger = logging.getLogger(__name__)
def submit_work_order_get_result(input_request, request_mode, tamper,
output_json_file_name, uri_client,
err_cd, work_order_id,
request_id):
""" Function to submit work order get result request.
Uses WorkOrderGetResult class to initialize request object.
Return err_cd, response."""
logger.info("------ Testing WorkOrderGetResult ------")
submiting_time = ""
if err_cd == 0:
if request_mode == "object":
# submit work order get result and retrieve response
logger.info("----- Constructing WorkOrderGetResult -----")
request_obj = WorkOrderGetResult()
request_obj.set_work_order_id(work_order_id)
request_obj.set_request_id(request_id)
input_get_result = json.loads(request_obj.to_string())
else:
request_obj = WorkOrderGetResult()
request_obj.set_work_order_id(work_order_id)
request_obj.set_request_id(request_id)
input_get_result = json.loads(request_obj.to_string())
logger.info("----- Validating WorkOrderGetResult Response ------")
response = {}
response_timeout_start = time.time()
response_timeout_multiplier = ((6000 / 3600) + 6) * 3
while "result" not in response:
if "error" in response:
if response["error"]["code"] != 5:
logger.info('WorkOrderGetResult - '
'Response received with error code. ')
err_cd = 1
break
response_timeout_end = time.time()
if ((response_timeout_end - response_timeout_start) >
response_timeout_multiplier):
logger.info('ERROR: WorkOrderGetResult response is not \
received within expected time.')
break
# submit work order get result request and retrieve response
response = submit_request(uri_client, input_get_result,
output_json_file_name)
time.sleep(GetResultWaitTime.LOOP_WAIT_TIME.value)
else:
logger.info('ERROR: WorkOrderGetResult not performed - \
Expected response not received for \
WorkOrderSubmit.')
response_tup = (err_cd, response, submiting_time)
return response_tup
| manojsalunke85/avalon0.6_automaiton | tests/validation_suite/automation_framework/work_order_get_result/work_order_get_result_utility.py | work_order_get_result_utility.py | py | 2,938 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "automation_framework.work_order_get_result.work_order_get_result_params.WorkOrderGetResult",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 35,
... |
15827613862 | from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import *
import logging
import pymongo
import emission.storage.timeseries.timequery as estt
import emission.core.wrapper.entry as ecwe
import emission.storage.timeseries.abstract_timeseries as esta
RAW_TRIP_KEY = "segmentation/raw_trip"
RAW_PLACE_KEY = "segmentation/raw_place"
RAW_SECTION_KEY = "segmentation/raw_section"
RAW_STOP_KEY = "segmentation/raw_stop"
RAW_UNTRACKED_KEY = "segmentation/raw_untracked"
CLEANED_TRIP_KEY = "analysis/cleaned_trip"
CLEANED_PLACE_KEY = "analysis/cleaned_place"
CLEANED_SECTION_KEY = "analysis/cleaned_section"
INFERRED_SECTION_KEY = "analysis/inferred_section"
CLEANED_STOP_KEY = "analysis/cleaned_stop"
CLEANED_UNTRACKED_KEY = "analysis/cleaned_untracked"
CLEANED_LOCATION_KEY = "analysis/recreated_location"
INFERRED_TRIP_KEY = "analysis/inferred_trip"
EXPECTED_TRIP_KEY = "analysis/expected_trip"
CONFIRMED_TRIP_KEY = "analysis/confirmed_trip"
CONFIRMED_PLACE_KEY = "analysis/confirmed_place"
CONFIRMED_UNTRACKED_KEY = "analysis/confirmed_untracked"
COMPOSITE_TRIP_KEY = "analysis/composite_trip"
METRICS_DAILY_USER_COUNT = "metrics/daily_user_count"
METRICS_DAILY_MEAN_COUNT = "metrics/daily_mean_count"
METRICS_DAILY_USER_DISTANCE = "metrics/daily_user_distance"
METRICS_DAILY_MEAN_DISTANCE = "metrics/daily_mean_distance"
METRICS_DAILY_USER_DURATION = "metrics/daily_user_duration"
METRICS_DAILY_MEAN_DURATION = "metrics/daily_mean_duration"
METRICS_DAILY_USER_MEDIAN_SPEED = "metrics/daily_user_median_speed"
METRICS_DAILY_MEAN_MEDIAN_SPEED = "metrics/daily_mean_median_speed"
INFERRED_LABELS_KEY = "inference/labels"
TRIP_MODEL_STORE_KEY = "inference/trip_model"
# General methods
def get_object(key, object_id):
return get_entry(key, object_id).data
def get_entry(key, object_id):
return esta.TimeSeries.get_aggregate_time_series().get_entry_from_id(
key, object_id)
def get_objects(key, user_id, time_query, geo_query=None):
return [entry.data for entry in
get_entries(key, user_id=user_id, time_query=time_query,
geo_query=geo_query)]
def get_entries(key, user_id, time_query, untracked_key = None,
geo_query=None,
extra_query_list=None):
ts = get_timeseries_for_user(user_id)
key_list = [key] if untracked_key is None else [key, untracked_key]
if untracked_key is not None:
logging.debug("after appending untracked_key %s, key_list is %s" %
(untracked_key, key_list))
doc_cursor = ts.find_entries(key_list, time_query, geo_query, extra_query_list)
# TODO: Fix "TripIterator" and return it instead of this list
curr_entry_list = [ecwe.Entry(doc) for doc in doc_cursor]
logging.debug("Returning entry with length %d result" % len(curr_entry_list))
return curr_entry_list
def get_data_df(key, user_id, time_query, geo_query=None,
extra_query_list=None):
ts = get_timeseries_for_user(user_id)
data_df = ts.get_data_df(key, time_query,
geo_query, extra_query_list)
logging.debug("Returning entry with length %d result" % len(data_df))
return data_df
def get_timeseries_for_user(user_id):
if user_id is not None:
ts = esta.TimeSeries.get_time_series(user_id)
else:
ts = esta.TimeSeries.get_aggregate_time_series()
logging.debug("for user %s, returning timeseries %s" % (user_id, ts))
return ts
# Object-specific associations
def get_time_query_for_trip_like(key, trip_like_id):
"""
Returns the query that returns all the points associated with this
trip-like (examples of trip-like objects are: raw trip, cleaned trip, raw section)
"""
trip = get_object(key, trip_like_id)
return get_time_query_for_trip_like_object(trip)
def get_time_query_for_trip_like_object(trip_like):
"""
Returns the query that returns all the points associated with this
trip-like (raw trip, cleaned trip, raw section)
"""
return estt.TimeQuery("data.ts", trip_like.start_ts, trip_like.end_ts)
| e-mission/e-mission-server | emission/storage/decorations/analysis_timeseries_queries.py | analysis_timeseries_queries.py | py | 4,240 | python | en | code | 22 | github-code | 36 | [
{
"api_name": "future.standard_library.install_aliases",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "future.standard_library",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "emission.storage.timeseries.abstract_timeseries.TimeSeries.get_aggregate_time_serie... |
13460739357 | import torch, torchvision
import os
class CNN(torch.nn.Module):
def __init__(self, network_type, dataset_name):
super(CNN, self).__init__()
self.network_type = network_type
if dataset_name == 'dogscats':
classes = 2
elif dataset_name == 'imagenet':
classes = 6
if self.network_type == 'images_resnet18':
self.model = torchvision.models.resnet18()
self.model.fc = torch.nn.Linear(512,classes)
elif self.network_type == 'images_mobilenetv2' :
self.model = torchvision.models.mobilenet_v2()
self.model.classifier = torch.nn.Linear(1280,classes)
elif self.network_type == 'images_shufflenetv2':
self.model = torchvision.models.shufflenet_v2_x0_5(pretrained=True)
self.model.fc = torch.nn.Linear(1024,classes)
def forward(self, x):
return self.model(x) | burklight/Adversarial-Attacks-Pytorch | src/networks.py | networks.py | py | 943 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.nn",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "torchvision.models.resnet18",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "torchvision.models",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "torc... |
5450602551 | from enum import Enum
import numpy as np
import pandas as pd
from dataclasses import dataclass
from copy import deepcopy
from typing import List, Union
class Direction(Enum):
UP = 'U'
DOWN = 'D'
LEFT = 'L'
RIGHT = 'R'
@dataclass
class Motion:
direction: Union[Direction, str]
steps: int
def __post_init__(self):
if isinstance(self.direction, str):
self.direction = Direction(self.direction)
@dataclass
class Position:
x: int = 0
y: int = 0
class Iteration:
def __init__(self, knots_number: int):
self.knots_number = knots_number
self.knots: List[Position] = [Position() for _ in range(knots_number)]
@property
def H(self) -> Position:
return self.knots[0]
@property
def T(self) -> Position:
return self.knots[-1]
class Simulation:
def __init__(self, knots_number: int):
self.knots_number = knots_number
self.iterations: List[Iteration] = [Iteration(knots_number)]
@property
def last_iteration(self) -> Iteration:
return self.iterations[-1]
def execute_motion(self, motion: Motion) -> None:
for step in range(motion.steps):
new_iter = deepcopy(self.last_iteration)
Moves.head_move(new_iter.knots[0], motion.direction)
for i in range(1, self.knots_number):
Moves.tail_move(new_iter.knots[i - 1], new_iter.knots[i])
self.iterations.append(new_iter)
def execute_motions(self, motions: List[Motion]) -> None:
for motion in motions:
self.execute_motion(motion)
@property
def tail_positions(self) -> pd.DataFrame:
df = pd.DataFrame([(i.T.x, i.T.y) for i in self.iterations])
df.columns = ('x', 'y')
return df
class Moves:
@staticmethod
def head_move(H: Position, direction: Direction) -> None:
if direction == Direction.UP:
H.y += 1
elif direction == Direction.DOWN:
H.y -= 1
elif direction == Direction.LEFT:
H.x -= 1
elif direction == Direction.RIGHT:
H.x += 1
@staticmethod
def tail_move(H: Position, T: Position) -> None:
x_diff = T.x - H.x
y_diff = T.y - H.y
if x_diff > 1:
if y_diff > 1:
T.x = H.x + 1
T.y = H.y + 1
elif y_diff < -1:
T.x = H.x + 1
T.y = H.y - 1
else:
T.x = H.x + 1
T.y = H.y
elif x_diff < -1:
if y_diff > 1:
T.x = H.x - 1
T.y = H.y + 1
elif y_diff < -1:
T.x = H.x - 1
T.y = H.y - 1
else:
T.x = H.x - 1
T.y = H.y
else:
if y_diff > 1:
T.x = H.x
T.y = H.y + 1
elif y_diff < -1:
T.x = H.x
T.y = H.y - 1
else:
pass
def read_input_row(row: str) -> Motion:
direction, steps = row.split(' ')
return Motion(direction, int(steps))
def main() -> None:
with open('input.txt', 'r') as f:
input_data = f.read().strip().split('\n')
motions: List[Motion] = [read_input_row(row) for row in input_data]
def part_one() -> int:
simulation = Simulation(knots_number=2)
simulation.execute_motions(motions)
return len(simulation.tail_positions.drop_duplicates())
def part_two() -> int:
simulation = Simulation(knots_number=10)
simulation.execute_motions(motions)
return len(simulation.tail_positions.drop_duplicates())
print(f'Part one: {part_one()}')
print(f'Part two: {part_two()}')
if __name__ == '__main__':
main()
| wbonna352/adventofcode2022 | day_09/main.py | main.py | py | 3,827 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "enum.Enum",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "dataclasses.dataclass",
... |
41636716649 | import argparse
def create_parser():
parser = argparse.ArgumentParser(description='HR invetory software')
parser.add_argument('path', help='Path to file to be exported')
parser.add_argument('--export', action='store_true', help='Export current settings to json file')
return parser
def main():
from hr import users, inventory
args = create_parser().parse_args()
if args.export:
inventory.export_users(args.path)
else:
user_list = inventory.read_file(args.path)
users.match_users(user_list)
| hamakohako/hr_inventory_test | src/hr/cli.py | cli.py | py | 510 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "hr.inventory.export_users",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "hr.inventory",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "hr.inve... |
9588027057 | """
A Jarvis plugin for listening music according
to your mood through Spotify's Web Player!
Jarvis asks for your mood and based on your choice it
opens a specific playlist of Spotify that fits
your mood.
"""
import webbrowser
from plugin import plugin
from plugin import require
from colorama import Fore
@require(network=True)
@plugin("mood music")
def open_spotify(jarvis, s):
jarvis.say("\nHello! What's your mood for today? \n")
# list that stores the available mood choices
list_of_moods = ["1.Feel Good Morning",
"2.So excited just can't hide it!",
"3.Party time",
"4.Workout beats",
"5.Me, myself and I",
"6.Chilled out",
"7.Roadtrip",
"8.Sunset to Sunrise",
"9.Jazz Chills",
"10.Back to 90s",
"11.Stress Relief",
"12.Not my Day,my Week,my Month or even my Year",
]
# loops for printing the available moods
for i in range(0, 3):
jarvis.say(list_of_moods[i], Fore.LIGHTCYAN_EX)
for i in range(3, 6):
jarvis.say(list_of_moods[i], Fore.CYAN)
for i in range(6, 9):
jarvis.say(list_of_moods[i], Fore.LIGHTMAGENTA_EX)
for i in range(9, 12):
jarvis.say(list_of_moods[i], Fore.MAGENTA)
print()
stop = False
while not stop:
# variable for validating the input mood
# initialize it as True
invalid_mood = True
# loop for validating the input mood
# input must be integer
# and in the range of 1-14
while invalid_mood:
mood = jarvis.input("Choose your mood (1-12): ")
print()
try:
int_mood = int(mood)
if int_mood < 1 or int_mood > 12:
print("Sorry invalid input was given! "
"Please enter a valid one!(1-12)")
else:
invalid_mood = False
except ValueError:
invalid_mood = True
print("Sorry invalid input was given! "
"Please enter a valid one!(1-12)")
# variable containing the main url
# of a Spotify's playlist
url = "https://open.spotify.com/playlist/"
# dictionary for storing the urls based on every mood
# url is defined by the main url
# plus the unique ending of each url
url_dict = {"1": url + "3IBrsav3Sh8AImtaGoaP07",
"2": url + "37i9dQZF1DWSf2RDTDayIx",
"3": url + "4MKC0zUOwvz5gGfKX93LV1",
"4": url + "190wZ2oVo7MTrBvNlPiub2",
"5": url + "37i9dQZF1DWZLcGGC0HJbc",
"6": url + "37i9dQZF1DX889U0CL85jj",
"7": url + "0l0a4uSRYz0VWnt38VAEzR",
"8": url + "0Au3b2NB5uz8Iwwx5sl6K5",
"9": url + "37i9dQZF1DX0SM0LYsmbMT",
"10": url + "37i9dQZF1DXbTxeAdrVG2l",
"11": url + "37i9dQZF1DWXe9gFZP0gtP",
"12": url + "3c0Nv5CY6TIaRszlTZbUFk",
}
# opens the Spotify Web Player
webbrowser.open(url_dict[mood])
jarvis.say("Changed your mood?")
answer = jarvis.input("Type anything to continue or No to exit: ")
print()
if answer.upper() == "NO":
stop = True
| sukeesh/Jarvis | jarviscli/plugins/mood_music.py | mood_music.py | py | 3,509 | python | en | code | 2,765 | github-code | 36 | [
{
"api_name": "colorama.Fore.LIGHTCYAN_EX",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "colorama.Fore",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "colorama.Fore.CYAN",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": ... |
1576881503 | from setuptools import setup, find_packages
with open('readme.md', encoding='utf-8') as f:
long_description = f.read()
setup(
packages = find_packages(),
name = 'pbat',
version = '0.0.16',
author = "Stanislav Doronin",
author_email = "mugisbrows@gmail.com",
url = 'https://github.com/mugiseyebrows/pbat',
description = 'Batch file preprocessor',
long_description = long_description,
long_description_content_type = 'text/markdown',
install_requires = ['lark'],
package_data = {
'pbat': ['examples/*.pbat', 'examples/*.bat', '*.lark']
},
entry_points = {
'console_scripts': [
'pbat = pbat.compile:main'
]
}
) | mugiseyebrows/pbat | setup.py | setup.py | py | 705 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "setuptools.setup",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "setuptools.find_packages",
"line_number": 7,
"usage_type": "call"
}
] |
39924563926 | from django.db import models
from django.utils import timezone
from . import Season
class SeasonPlayerManager(models.Manager):
def update_active(self, player, elo, wins, losses):
"""Update or create the season player instance for the active season."""
active_season = Season.objects.get_active()
updated_values = {
'player': player,
'season': active_season,
'elo_score': elo,
'win_count': wins,
'loss_count': losses
}
SeasonPlayer.objects.update_or_create(player=player, season=active_season, defaults=updated_values)
def get_winner(self, season):
"""Returns the player with the most elo points for the given season."""
return (
self.get_queryset()
.filter(season=season)
.order_by('-elo_score')
.first()
)
class SeasonPlayer(models.Model):
"""Denormalized representation of a player for a particular season."""
# FK relationships to other models
player = models.ForeignKey('core.Player')
season = models.ForeignKey('core.Season')
# denormalized counts and elo score
elo_score = models.IntegerField(blank=True)
win_count = models.IntegerField(blank=True)
loss_count = models.IntegerField(blank=True)
objects = SeasonPlayerManager()
def __unicode__(self):
return "{player} during {season} season".format(
player=self.player,
season=self.season
)
def undo_win(self, elo_points, commit=True):
self.elo_score -= elo_points
self.win_count -= 1
if commit:
self.save()
def undo_loss(self, elo_points, commit=True):
self.elo_score += elo_points
self.loss_count -= 1
if commit:
self.save()
| dannymilsom/poolbot-server | src/core/models/season_player.py | season_player.py | py | 1,847 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "django.db.models.Manager",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.db.models.Model",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name... |
4635120984 | #coding=utf-8
from confluent_kafka import Producer
import MySQLdb
import json
import time
import random
p = Producer({"bootstrap.servers": "118.24.53.99:9092"})
db = MySQLdb.connect("localhost", "root", "123456", "test_kafka", charset='utf8' )
cursor = db.cursor()
sql = "SELECT msg_body FROM order_kafka_msg;"
def delivery_report(err, msg):
""" Called once for each message produced to indicate delivery result.
Triggered by poll() or flush(). """
if err is not None:
print('Message delivery failed: {}'.format(err))
else:
print('Message delivered to {} [{}]'.format(msg.topic(), msg.partition()))
data_list=[]
# while (True):
cursor.execute(sql.encode("utf-8"))
results = cursor.fetchall()
for result in results:
print("------------------")
setJson = json.dumps(json.loads(result[0]),ensure_ascii=False)
print(setJson.encode("utf-8"))
p.produce("test",setJson.encode("utf-8"),callback=delivery_report)
p.flush()
# time.sleep(60)
| liu-xiaoran/demo | java/kafka/pd2.py | pd2.py | py | 1,018 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "confluent_kafka.Producer",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "MySQLdb.connect",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "json.loads",
"... |
5318351393 | import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import scipy
from scipy.misc import derivative
def fn_plot1d(fn, x_min, x_max, filename):
num_values = 1000
x = np.linspace(x_min, x_max, num_values)
fnv = np.vectorize(fn)
y = fnv(x)
Xlabel = "X axis"
Ylabel = fn.__name__ + "(x)"
Title = "1D plot of " + Ylabel
plt.plot(x, y)
plt.xlabel(Xlabel)
plt.ylabel(Ylabel)
plt.title(Title)
#plt.show()
plt.savefig(filename)
plt.clf()
def fn_plot2d(fn, x_min, x_max, y_min, y_max, filename):
num_x = 1000
num_y = 1000
x = np.linspace(x_min, x_max, num_x)
y = np.linspace(y_min, y_max, num_y)
X,Y = np.meshgrid(x,y)
fnv = np.vectorize(fn)
Z = fnv(X,Y)
fig = plt.figure()
ax = fig.gca(projection = '3d')
ax.plot_surface(X,Y,Z)
Xlabel = "X axis"
Ylabel = "Y axis"
Zlabel = fn.__name__ + "(x,y)"
Title = "2D plot of " + Zlabel
ax.set_xlabel(Xlabel)
ax.set_ylabel(Ylabel)
ax.set_zlabel(Zlabel)
ax.set_title(Title)
#plt.show()
ax.figure.savefig(filename)
#plt.gca(projection = 'rectilinear')
plt.clf()
def nth_derivative_plotter(fn, n, x_min, x_max, filename):
num_values = 1000
x = np.linspace(x_min, x_max, num_values)
fnv = np.vectorize(fn)
y = derivative(fnv,x,dx = 1e-3, n = n)
strn = str(n)
Xlabel = "X axis"
Ylabel = fn.__name__ + fr'$^{{({strn})}}$' + '(x)'
Title = "nth derivative plotter (n = " + strn + ")"
plt.plot(x,y)
plt.xlabel(Xlabel)
plt.ylabel(Ylabel)
plt.title(Title)
plt.savefig(filename)
plt.clf()
def h(x):
if x > 0:
return np.exp(-1/(x*x))
else:
return 0
def g(x):
return (h(2-x)/(h(2-x)+h(x-1)))
def b(x):
if x < 0:
x = -x
return g(x)
def sinc(x,y):
if (x == 0 and y == 0):
return 1
else:
norm = (x**2 + y**2) ** (0.5)
return (np.sin(norm)/norm)
if __name__ == '__main__':
fn_plot1d(b,-2,2,'fn1plot.png')
fn_plot2d(sinc, -1.5*np.pi, 1.5*np.pi, -1.5*np.pi, 1.5*np.pi, 'fn2plot.png')
nth_derivative_plotter(b, 1, -2, 2, 'bd_1.png')
nth_derivative_plotter(b, 2, -2, 2, 'bd_2.png')
| CS251-Fall-2020/outlab4-190050013-190070020 | task4/task4.py | task4.py | py | 2,230 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.linspace",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.vectorize",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplo... |
16551248169 | import torch
import torch.nn as nn
class NegativeLabelLoss(nn.Module):
"""
https://www.desmos.com/calculator/9oaqcjayrw
"""
def __init__(self, ignore_index=-100, reduction='mean',alpha=1.0,beta=0.8):
super(NegativeLabelLoss, self).__init__()
self.softmax = nn.Softmax(dim=1)
self.alpha = alpha
self.beta = beta
self.nll = nn.NLLLoss(ignore_index=ignore_index, reduction=reduction)
def forward(self, logits, target):
nsoftmax = self.softmax(logits)
nsoftmax = torch.where(
nsoftmax<=torch.tensor([self.beta],dtype=logits.dtype).to(logits.device),
torch.tensor([0.0],requires_grad=True,dtype=logits.dtype).to(logits.device),
nsoftmax
)
nsoftmax = torch.clamp((1.0 - nsoftmax), min=1e-32)
return self.nll(torch.log(nsoftmax) * self.alpha, target) | p208p2002/qgg-utils | qgg_utils/__init__.py | __init__.py | py | 897 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "torch.nn.Softmax",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_num... |
19262690162 | from __future__ import print_function
import json
import logging
import os
import datetime
import calendar
import sys
from collections import OrderedDict
from pokemongo_bot.base_dir import _base_dir
from pokemongo_bot.services.item_recycle_worker import ItemRecycler
'''
Helper class for updating/retrieving Inventory data
Interesting info and formulas:
https://drive.google.com/file/d/0B0TeYGBPiuzaenhUNE5UWnRCVlU/view
https://www.reddit.com/r/pokemongodev/comments/4w7mdg/combat_damage_calculation_formula_exactly/
'''
class FileIOException(Exception):
pass
#
# Abstraction
class _StaticInventoryComponent(object):
# optionally load static data from file,
# dropping the data in a static variable named STATIC_DATA
STATIC_DATA_FILE = None
STATIC_DATA = None
def __init__(self):
if self.STATIC_DATA_FILE is not None:
self.init_static_data()
@classmethod
def init_static_data(cls):
if not hasattr(cls, 'STATIC_DATA') or cls.STATIC_DATA is None:
cls.STATIC_DATA = cls.process_static_data(
json.load(open(cls.STATIC_DATA_FILE)))
@classmethod
def process_static_data(cls, data):
# optional hook for processing the static data
# default is to use the data directly
return data
class _BaseInventoryComponent(_StaticInventoryComponent):
TYPE = None # base key name for items of this type
ID_FIELD = None # identifier field for items of this type
def __init__(self):
self._data = {}
super(_BaseInventoryComponent, self).__init__()
def parse(self, item):
# optional hook for parsing the dict for this item
# default is to use the dict directly
return item
def retrieve_data(self, inventory):
assert self.TYPE is not None
assert self.ID_FIELD is not None
ret = {}
for item in inventory:
data = item['inventory_item_data']
if self.TYPE in data:
item = data[self.TYPE]
key = item[self.ID_FIELD]
ret[key] = self.parse(item)
return ret
def refresh(self, inventory):
self._data = self.retrieve_data(inventory)
def get(self, object_id):
return self._data.get(object_id)
def all(self):
return list(self._data.values())
#
# Inventory Components
class Player(_BaseInventoryComponent):
TYPE = 'player_stats'
def __init__(self, bot):
self.bot = bot
self._exp = None
self._level = None
self.next_level_xp = None
self.pokemons_captured = None
self.poke_stop_visits = None
self.player_stats = None
super(_BaseInventoryComponent, self).__init__()
@property
def level(self):
return self._level
@level.setter
def level(self, value):
self._level = value
@property
def exp(self):
return self._exp
@exp.setter
def exp(self, value):
# if new exp is larger than or equal to next_level_xp
if value >= self.next_level_xp:
self.level = self._level + 1
# increase next_level_xp to a big amount
# will be fix on the next heartbeat
self.next_level_xp += 10000000
self._exp = value
def refresh(self,inventory):
self.player_stats = self.retrieve_data(inventory)
def parse(self, item):
if not item:
item = {}
self.next_level_xp = item.get('next_level_xp', 0)
self.exp = item.get('experience', 0)
self.level = item.get('level', 0)
self.pokemons_captured = item.get('pokemons_captured', 0)
self.poke_stop_visits = item.get('poke_stop_visits', 0)
def retrieve_data(self, inventory):
ret = {}
for item in inventory:
data = item['inventory_item_data']
if self.TYPE in data:
item = data[self.TYPE]
ret = item
self.parse(item)
return ret
class Candies(_BaseInventoryComponent):
TYPE = 'candy'
ID_FIELD = 'family_id'
@classmethod
def family_id_for(cls, pokemon_id):
return Pokemons.candyid_for(pokemon_id)
def get(self, pokemon_id):
family_id = self.family_id_for(pokemon_id)
return self._data.setdefault(family_id, Candy(family_id, 0))
def parse(self, item):
candy = item['candy'] if 'candy' in item else 0
return Candy(item['family_id'], candy)
class Pokedex(_BaseInventoryComponent):
TYPE = 'pokedex_entry'
ID_FIELD = 'pokemon_id'
def seen(self, pokemon_id):
return pokemon_id in self._data
def captured(self, pokemon_id):
return self.seen(pokemon_id) and self._data.get(pokemon_id, {}).get('times_captured', 0) > 0
def shiny_seen(self, pokemon_id):
return self._data.get(pokemon_id, {}).get('encountered_shiny', False)
def shiny_captured(self, pokemon_id):
return self._data.get(pokemon_id, {}).get('captured_shiny', False)
class Item(object):
"""
Representation of an item.
"""
def __init__(self, item_id, item_count):
"""
Representation of an item
:param item_id: ID of the item
:type item_id: int
:param item_count: Quantity of the item
:type item_count: int
:return: An item
:rtype: Item
"""
self.id = item_id
self.name = Items.name_for(self.id)
self.count = item_count
def remove(self, amount):
"""
Remove a specified amount of an item from the cached inventory.
Note that it does **NOT** removes it in the server, it only removes it from the local cached inventory.
:param amount: Amount to remove
:type amount: int
:return: Nothing
:rtype: None
"""
if self.count < amount:
raise Exception('Tried to remove more {} than you have'.format(self.name))
self.count -= amount
def recycle(self, amount_to_recycle):
"""
Recycle (discard) the specified amount of item from the item inventory.
It is making a call to the server to request a recycling as well as updating the cached inventory.
:param amount_to_recycle: The amount to recycle.
:type amount_to_recycle: int
:return: Returns whether or not the task went well
:rtype: worker_result.WorkerResult
"""
if self.count < amount_to_recycle:
raise Exception('Tried to remove more {} than you have'.format(self.name))
item_recycler = ItemRecycler(_inventory.bot, self, amount_to_recycle)
item_recycler_work_result = item_recycler.work()
if item_recycler.is_recycling_success():
self.remove(amount_to_recycle)
return item_recycler_work_result
def add(self, amount):
"""
Add a specified amount of the item to the local cached inventory
:param amount: Amount to add
:type amount: int
:return: Nothing.
:rtype: None
"""
if amount < 0:
raise Exception('Must add positive amount of {}'.format(self.name))
self.count += amount
def __str__(self):
return self.name + " : " + str(self.count)
class Items(_BaseInventoryComponent):
TYPE = 'item'
ID_FIELD = 'item_id'
STATIC_DATA_FILE = os.path.join(_base_dir, 'data', 'items.json')
def parse(self, item_data):
"""
Make an instance of an Item from raw item data.
:param item_data: Item data to make an item from
:return: Instance of the Item.
:rtype: Item
"""
item_id = item_data.get(Items.ID_FIELD, None)
item_count = item_data['count'] if 'count' in item_data else 0
return Item(item_id, item_count)
def all(self):
"""
Get EVERY Item from the cached inventory.
:return: List of evey item in the cached inventory
:rtype: list of Item
"""
return list(self._data.values())
def get(self, item_id):
"""
Get ONE Item from the cached inventory.
:param item_id: Item's ID to search for.
:return: Instance of the item from the cached inventory
:rtype: Item
"""
return self._data.setdefault(item_id, Item(item_id, 0))
@classmethod
def name_for(cls, item_id):
"""
Search the name for an item from its ID.
:param item_id: Item's ID to search for.
:return: Item's name.
:rtype: str
"""
return cls.STATIC_DATA[str(item_id)]
@classmethod
def get_space_used(cls):
"""
Counts the space used in item inventory.
:return: The space used in item inventory.
:rtype: int
"""
space_used = 1
for item_in_inventory in _inventory.items.all():
space_used += item_in_inventory.count
return space_used
@classmethod
def get_space_left(cls):
"""
Compute the space left in item inventory.
:return: The space left in item inventory. 0 if the player has more item than his item inventory can carry.
:rtype: int
"""
_inventory.retrieve_inventories_size()
space_left = _inventory.item_inventory_size - cls.get_space_used()
# Space left should never be negative. Returning 0 if the computed value is negative.
return space_left if space_left >= 0 else 0
@classmethod
def has_space_for_loot(cls):
"""
Returns a value indicating whether or not the item inventory has enough space to loot a fort
:return: True if the item inventory has enough space; otherwise, False.
:rtype: bool
"""
max_number_of_items_looted_at_stop = 5
return cls.get_space_left() >= max_number_of_items_looted_at_stop
class AppliedItem(object):
"""
Representation of an applied item, like incense.
"""
def __init__(self, item_id, expire_ms, applied_ms):
"""
Representation of an applied item
:param item_id: ID of the item
:type item_id: int
:param expire_ms: expire in ms
:type expire_ms: in
:param applied_ms: applied at
:type applied_ms: int
:return: An applied item
:rtype: AppliedItemItem
"""
self.id = item_id
self.name = Items.name_for(self.id)
self.applied_ms = applied_ms
self.expire_ms = expire_ms
def refresh(self,inventory):
self.retrieve_data(inventory)
def parse(self, item):
if not item:
item = {}
self.id = item.get('id', 0)
self.name = Items.name_for(self.id)
self.expire_ms = item.get('expire_ms', 0)
self.applied_ms = item.get('applied_ms', 0)
def retrieve_data(self, inventory):
ret = {}
for item in inventory:
data = item['inventory_item_data']
if self.TYPE in data:
item = data[self.TYPE]
ret = item
self.parse(item)
return ret
def __str__(self):
return self.name
class AppliedItems(_BaseInventoryComponent):
TYPE='applied_items'
ID_FIELD = 'item_id'
STATIC_DATA_FILE = os.path.join(_base_dir, 'data', 'items.json')
def all(self):
"""
Get EVERY Item from the cached inventory.
:return: List of evey item in the cached inventory
:rtype: list of Item
"""
return list(self._data.values())
def get(self, item_id):
"""
Get ONE Item from the cached inventory.
:param item_id: Item's ID to search for.
:return: Instance of the item from the cached inventory
:rtype: Item
"""
return self._data.setdefault(item_id, Item(item_id, 0))
@classmethod
def name_for(cls, item_id):
"""
Search the name for an item from its ID.
:param item_id: Item's ID to search for.
:return: Item's name.
:rtype: str
"""
return cls.STATIC_DATA[str(item_id)]
class Pokemons(_BaseInventoryComponent):
TYPE = 'pokemon_data'
ID_FIELD = 'id'
STATIC_DATA_FILE = os.path.join(_base_dir, 'data', 'pokemon.json')
@classmethod
def process_static_data(cls, data):
data = [PokemonInfo(d) for d in data]
# process evolution info
for p in data:
next_all = p.next_evolutions_all
if len(next_all) <= 0:
continue
# only next level evolutions, not all possible
p.next_evolution_ids = [idx for idx in next_all
if data[idx-1].prev_evolution_id == p.id]
# only final evolutions
p.last_evolution_ids = [idx for idx in next_all
if not data[idx-1].has_next_evolution]
assert len(p.last_evolution_ids) > 0
return data
@classmethod
def get_space_used(cls):
"""
Counts the space used in pokemon inventory.
:return: The space used in pokemon inventory.
:rtype: int
"""
return len(_inventory.pokemons.all_with_eggs())
@classmethod
def get_space_left(cls):
"""
Compute the space left in pokemon inventory.
:return: The space left in pokemon inventory.
:rtype: int
"""
_inventory.retrieve_inventories_size()
space_left = _inventory.pokemon_inventory_size - cls.get_space_used()
return space_left
@classmethod
def data_for(cls, pokemon_id):
# type: (int) -> PokemonInfo
return cls.STATIC_DATA[pokemon_id - 1]
@classmethod
def name_for(cls, pokemon_id):
return cls.data_for(pokemon_id).name
@classmethod
def candyid_for(cls, pokemon_id):
return cls.data_for(pokemon_id).candyid
@classmethod
def id_for(cls, pokemon_name):
# TODO: Use a better searching algorithm. This one is O(n)
for data in cls.STATIC_DATA:
if data.name.lower() == pokemon_name.lower():
return data.id
raise Exception('Could not find pokemon named {}'.format(pokemon_name))
@classmethod
def first_evolution_id_for(cls, pokemon_id):
return cls.data_for(pokemon_id).first_evolution_id
@classmethod
def prev_evolution_id_for(cls, pokemon_id):
return cls.data_for(pokemon_id).prev_evolution_id
@classmethod
def next_evolution_ids_for(cls, pokemon_id):
return cls.data_for(pokemon_id).next_evolution_ids
@classmethod
def last_evolution_ids_for(cls, pokemon_id):
return cls.data_for(pokemon_id).last_evolution_ids
@classmethod
def has_next_evolution(cls, pokemon_id):
return cls.data_for(pokemon_id).has_next_evolution
@classmethod
def evolution_cost_for(cls, pokemon_id):
return cls.data_for(pokemon_id).evolution_cost
@classmethod
def evolution_item_for(cls, pokemon_id):
return cls.data_for(pokemon_id).evolution_item
@classmethod
def evolution_items_needed_for(cls, pokemon_id):
return cls.data_for(pokemon_id).evolution_item_needed
def parse(self, item):
if 'is_egg' in item:
return Egg(item)
return Pokemon(item)
def all(self):
# by default don't include eggs in all pokemon (usually just
# makes caller's lives more difficult)
return [p for p in super(Pokemons, self).all() if not isinstance(p, Egg)]
def all_with_eggs(self):
# count pokemon AND eggs, since eggs are counted as bag space
return super(Pokemons, self).all()
def add(self, pokemon):
if pokemon.unique_id <= 0:
raise ValueError("Can't add a pokemon without id")
if pokemon.unique_id in self._data:
raise ValueError("Pokemon already present in the inventory")
self._data[pokemon.unique_id] = pokemon
def get_from_unique_id(self, pokemon_unique_id):
if pokemon_unique_id not in self._data:
raise ValueError("Pokemon not present in the inventory")
return self._data[pokemon_unique_id]
def remove(self, pokemon_unique_id):
if pokemon_unique_id not in self._data:
raise ValueError("Pokemon not present in the inventory")
self._data.pop(pokemon_unique_id)
#
# Static Components
class Types(_StaticInventoryComponent):
"""
Types of attacks and pokemons
See more information:
https://i.redd.it/oy7lrixl8r9x.png
https://www.reddit.com/r/TheSilphRoad/comments/4t8seh/pokemon_go_type_advantage_chart/
https://github.com/jehy/Pokemon-Go-Weakness-calculator/blob/master/app/src/main/java/ru/jehy/pokemonweaknesscalculator/MainActivity.java#L31
"""
STATIC_DATA_FILE = os.path.join(_base_dir, 'data', 'types.json')
@classmethod
def process_static_data(cls, data):
# create instances
ret = OrderedDict()
for t in sorted(data, key=lambda x: x["name"]):
name = str(t["name"])
ret[name] = Type(name, t["effectiveAgainst"], t["weakAgainst"])
# additional manipulations
size = len(ret)
by_effectiveness = {}
by_resistance = {}
for t in ret.itervalues(): # type: Type
t.attack_effective_against = [ret[name] for name in t.attack_effective_against]
t.attack_weak_against = [ret[name] for name in t.attack_weak_against]
# group types effective against, weak against specific types
for l, d in (t.attack_effective_against, by_effectiveness), \
(t.attack_weak_against, by_resistance):
for tt in l:
if tt not in d:
d[tt] = set()
d[tt].add(t)
# calc average factor for damage of this type relative to all types
t.rate = (size
+ ((EFFECTIVENESS_FACTOR-1) * len(t.attack_effective_against))
- ((1-RESISTANCE_FACTOR) * len(t.attack_weak_against))) / size
# set pokemon type resistance/weakness info
for t in ret.itervalues(): # type: Type
t.pokemon_resistant_to = by_resistance[t]
t.pokemon_vulnerable_to = by_effectiveness[t]
return ret
@classmethod
def get(cls, type_name):
# type: (Union[string, Type]) -> Type
type_name = str(type_name)
if type_name not in cls.STATIC_DATA:
raise ValueError("Unknown type: {}".format(type_name))
return cls.STATIC_DATA[type_name]
@classmethod
def all(cls):
return cls.STATIC_DATA.values()
@classmethod
def rating(cls):
return sorted(cls.all(), key=lambda x: x.rate, reverse=True)
class LevelToCPm(_StaticInventoryComponent):
"""
Data for the CP multipliers at different levels
See http://pokemongo.gamepress.gg/cp-multiplier
See https://github.com/justinleewells/pogo-optimizer/blob/edd692d/data/game/level-to-cpm.json
"""
STATIC_DATA_FILE = os.path.join(_base_dir, 'data', 'level_to_cpm.json')
MAX_LEVEL = 40
MAX_CPM = .0
@classmethod
def init_static_data(cls):
super(LevelToCPm, cls).init_static_data()
cls.MAX_CPM = cls.cp_multiplier_for(cls.MAX_LEVEL)
assert cls.MAX_CPM > .0
@classmethod
def cp_multiplier_for(cls, level):
return cls.STATIC_DATA[int(2 * (level - 1))]
@classmethod
def level_from_cpm(cls, cp_multiplier):
return min(range(len(cls.STATIC_DATA)), key=lambda i: abs(cls.STATIC_DATA[i] - cp_multiplier)) * 0.5 + 1
class _Attacks(_StaticInventoryComponent):
BY_NAME = {} # type: Dict[string, Attack]
BY_TYPE = {} # type: Dict[List[Attack]]
BY_DPS = [] # type: List[Attack]
@classmethod
def process_static_data(cls, moves):
ret = {}
by_type = {}
by_name = {}
fast = cls is FastAttacks
for attack in moves:
attack = Attack(attack) if fast else ChargedAttack(attack)
ret[attack.id] = attack
by_name[attack.name] = attack
attack_type = str(attack.type)
if attack_type not in by_type:
by_type[attack_type] = []
by_type[attack_type].append(attack)
for t in by_type.iterkeys():
attacks = sorted(by_type[t], key=lambda m: m.dps, reverse=True)
min_dps = attacks[-1].dps
max_dps = attacks[0].dps - min_dps
if max_dps > .0:
for attack in attacks: # type: Attack
attack.rate_in_type = (attack.dps - min_dps) / max_dps
by_type[t] = attacks
cls.BY_NAME = by_name
cls.BY_TYPE = by_type
cls.BY_DPS = sorted(ret.values(), key=lambda m: m.dps, reverse=True)
return ret
@classmethod
def data_for(cls, attack_id):
# type: (int) -> Attack
if attack_id not in cls.STATIC_DATA:
raise ValueError("Attack {} not found in {}".format(
attack_id, cls.__name__))
return cls.STATIC_DATA[attack_id]
@classmethod
def by_name(cls, name):
# type: (string) -> Attack
return cls.BY_NAME[name]
@classmethod
def list_for_type(cls, type_name):
# type: (Union[string, Type]) -> List[Attack]
"""
:return: Attacks sorted by DPS in descending order
"""
return cls.BY_TYPE[str(type_name)]
@classmethod
def all(cls):
return cls.STATIC_DATA.values()
@classmethod
def all_by_dps(cls):
return cls.BY_DPS
class FastAttacks(_Attacks):
STATIC_DATA_FILE = os.path.join(_base_dir, 'data', 'fast_moves.json')
class ChargedAttacks(_Attacks):
STATIC_DATA_FILE = os.path.join(_base_dir, 'data', 'charged_moves.json')
#
# Instances
class Type(object):
def __init__(self, name, effective_against, weak_against):
# type: (string, Iterable[Type], Iterable[Type]) -> None
self.name = name
# effective way to represent type with one character
# for example it's very useful for nicknaming pokemon
# using its attack types
#
# if first char is unique - use it, in other case
# use suitable substitute
type_to_one_char_map = {
'Bug': 'B',
'Dark': 'K',
'Dragon': 'D',
'Electric': 'E',
'Fairy': 'Y',
'Fighting': 'T',
'Fire': 'F',
'Flying': 'L',
'Ghost': 'H',
'Grass': 'A',
'Ground': 'G',
'Ice': 'I',
'Normal': 'N',
'Poison': 'P',
'Psychic': 'C',
'Rock': 'R',
'Steel': 'S',
'Water': 'W',
}
self.as_one_char = type_to_one_char_map[name]
# attack of this type is effective against ...
self.attack_effective_against = set(effective_against)
# attack of this type is weak against ...
self.attack_weak_against = set(weak_against)
# pokemon of this type is resistant to ...
self.pokemon_resistant_to = set() # type: Set[Type]
# pokemon of this type is vulnerable to ...
self.pokemon_vulnerable_to = set() # type: Set[Type]
# average factor for damage of this type relative to all types
self.rate = 1.
def __str__(self):
return self.name
def __repr__(self):
return self.name
class Candy(object):
def __init__(self, family_id, quantity):
self.type = Pokemons.name_for(family_id)
self.quantity = quantity
def consume(self, amount):
if self.quantity < amount:
raise Exception('Tried to consume more {} candy than you have'.format(self.type))
self.quantity -= amount
def add(self, amount):
if amount < 0:
raise Exception('Must add positive amount of candy')
self.quantity += amount
class Egg(object):
def __init__(self, data):
self._data = data
def has_next_evolution(self):
return False
class PokemonInfo(object):
"""
Static information about pokemon kind
"""
def __init__(self, data):
self._data = data
self.id = int(data["Number"])
self.name = data['Name'] # type: string
self.classification = data['Classification'] # type: string
# prepare types
self.type1 = Types.get(data['Type I'][0])
self.type2 = None
self.types = [self.type1] # required type
for t in data.get('Type II', []):
self.type2 = Types.get(t)
self.types.append(self.type2) # second type
break
# base chance to capture pokemon
self.capture_rate = data['CaptureRate']
# chance of the pokemon to flee away
self.flee_rate = data['FleeRate']
# km needed for buddy reward
self.buddy_distance_needed = data['BuddyDistanceNeeded']
# prepare attacks (moves)
self.fast_attacks = self._process_attacks()
self.charged_attack = self._process_attacks(charged=True)
# prepare movesets
self.movesets = self._process_movesets()
# Basic Values of the pokemon (identical for all pokemons of one kind)
self.base_attack = data['BaseAttack']
self.base_defense = data['BaseDefense']
self.base_stamina = data['BaseStamina']
# calculate maximum CP for the pokemon (best IVs, lvl 40)
self.max_cp = _calc_cp(self.base_attack, self.base_defense,
self.base_stamina)
#
# evolutions info for this pokemon
# id of the very first level evolution
self.first_evolution_id = self.id
# id of the previous evolution (one level only)
self.prev_evolution_id = None
# ids of all available previous evolutions in the family
self.prev_evolutions_all = []
if 'Previous evolution(s)' in data:
ids = [int(e['Number']) for e in data['Previous evolution(s)']]
self.first_evolution_id = ids[0]
self.prev_evolution_id = ids[-1]
self.prev_evolutions_all = ids
# Number of candies for the next evolution (if possible)
self.evolution_cost = 0
# Next evolution doesn't need a special item
self.evolution_item = None
self.evolution_item_needed = 0
# next evolution flag
self.has_next_evolution = 'Next evolution(s)' in data \
or 'Next Evolution Requirements' in data
# ids of the last level evolutions
self.last_evolution_ids = [self.id]
# ids of the next possible evolutions (one level only)
self.next_evolution_ids = []
#candies
self.candyid = int(data['Candy']['FamilyID'])
self.candyName = (data['Candy']['Name'])
self.next_evolutions_all = []
if self.has_next_evolution:
ids = [int(e['Number']) for e in data['Next evolution(s)']]
self.next_evolutions_all = ids
self.evolution_cost = int(data['Next Evolution Requirements']['Amount'])
if 'EvoItem' in data['Next Evolution Requirements']:
self.evolution_item = int(data['Next Evolution Requirements']['EvoItem'])
self.evolution_item_needed = int(data['Next Evolution Requirements']['EvoItemNeeded'])
@property
def family_id(self):
return self.first_evolution_id
@property
def is_seen(self):
return pokedex().seen(self.id)
@property
def is_captured(self):
return pokedex().captured(self.id)
def _process_movesets(self):
# type: () -> List[Moveset]
"""
The optimal moveset is the combination of two moves, one quick move
and one charge move, that deals the most damage over time.
Because each quick move gains a certain amount of energy (different
for different moves) and each charge move requires a different amount
of energy to use, sometimes, a quick move with lower DPS will be
better since it charges the charge move faster. On the same note,
sometimes a charge move that has lower DPS will be more optimal since
it may require less energy or it may last for a longer period of time.
Attacker have STAB (Same-type attack bonus - x1.25) pokemon have the
same type as attack. So we add it to the "Combo DPS" of the moveset.
The defender attacks in intervals of 1 second for the first 2 attacks,
and then in intervals of 2 seconds for the remainder of the attacks.
This explains why we see two consecutive quick attacks at the beginning
of the match. As a result, we add +2 seconds to the DPS calculation
for defender DPS output.
So to determine an optimal defensive moveset, we follow the same method
as we did for optimal offensive movesets, but instead calculate the
highest "Combo DPS" with an added 2 seconds to the quick move cool down.
Note: critical hits have not yet been implemented in the game
See http://pokemongo.gamepress.gg/optimal-moveset-explanation
See http://pokemongo.gamepress.gg/defensive-tactics
"""
# Prepare movesets
movesets = []
for fm in self.fast_attacks:
for chm in self.charged_attack:
movesets.append(Moveset(fm, chm, self.types, self.id))
assert len(movesets) > 0
# Calculate attack perfection for each moveset
movesets = sorted(movesets, key=lambda m: m.dps_attack)
worst_dps = movesets[0].dps_attack
best_dps = movesets[-1].dps_attack
if best_dps > worst_dps:
for moveset in movesets:
current_dps = moveset.dps_attack
moveset.attack_perfection = \
(current_dps - worst_dps) / (best_dps - worst_dps)
# Calculate defense perfection for each moveset
movesets = sorted(movesets, key=lambda m: m.dps_defense)
worst_dps = movesets[0].dps_defense
best_dps = movesets[-1].dps_defense
if best_dps > worst_dps:
for moveset in movesets:
current_dps = moveset.dps_defense
moveset.defense_perfection = \
(current_dps - worst_dps) / (best_dps - worst_dps)
return sorted(movesets, key=lambda m: m.dps, reverse=True)
def _process_attacks(self, charged=False):
# type: (bool) -> List[Attack]
key = 'Fast Attack(s)' if not charged else 'Special Attack(s)'
moves_dict = (ChargedAttacks if charged else FastAttacks).BY_NAME
moves = []
for name in self._data[key]:
if name not in moves_dict:
raise KeyError('Unknown {} attack: "{}"'.format(
'charged' if charged else 'fast', name))
moves.append(moves_dict[name])
moves = sorted(moves, key=lambda m: m.dps, reverse=True)
assert len(moves) > 0
return moves
class Pokemon(object):
def __init__(self, data):
self._data = data
# Unique ID for this particular Pokemon
self.unique_id = data.get('id', 0)
# Let's try this
self.encounter_id = data.get('encounter_id')
# Id of the such pokemons in pokedex
self.pokemon_id = data['pokemon_id']
# Static information
self.static = Pokemons.data_for(self.pokemon_id)
# Shiny information
self.display_data = data.get('pokemon_display')
self.shiny = self.display_data.get('shiny', False)
# self.form = self.display_data.get('form', )
# Combat points value
self.cp = data['cp']
# Base CP multiplier, fixed at the catch time
self.cp_bm = data['cp_multiplier']
# Changeable part of the CP multiplier, increasing at power up
self.cp_am = data.get('additional_cp_multiplier', .0)
# Resulting CP multiplier
self.cp_m = self.cp_bm + self.cp_am
# Current pokemon level (half of level is a normal value)
self.level = LevelToCPm.level_from_cpm(self.cp_m)
if 'level' not in self._data:
self._data['level'] = self.level
# Maximum health points
self.hp_max = data['stamina_max']
# Current health points
self.hp = data.get('stamina', 0) #self.hp_max)
assert 0 <= self.hp <= self.hp_max
# Individial Values of the current specific pokemon (different for each)
self.iv_attack = data.get('individual_attack', 0)
self.iv_defense = data.get('individual_defense', 0)
self.iv_stamina = data.get('individual_stamina', 0)
# Basic Values of the pokemon (identical for all pokemons of one kind)
base_attack = self.static.base_attack
base_defense = self.static.base_defense
base_stamina = self.static.base_stamina
self.name = self.static.name
self.nickname_raw = data.get('nickname', '')
self.nickname = self.nickname_raw or self.name
self.in_fort = 'deployed_fort_id' in data
if 'deployed_fort_id' in data:
self.fort_id = data['deployed_fort_id']
self.is_favorite = data.get('favorite', 0) is 1
self.buddy_candy = data.get('buddy_candy_awarded', 0)
self.is_bad = data.get('is_bad', False)
self.buddy_distance_needed = self.static.buddy_distance_needed
self.fast_attack = FastAttacks.data_for(data['move_1'])
self.charged_attack = ChargedAttacks.data_for(data['move_2']) # type: ChargedAttack
# Individial values (IV) perfection percent
self.iv = self._compute_iv_perfection()
# IV CP perfection - kind of IV perfection percent but calculated
# using weight of each IV in its contribution to CP of the best
# evolution of current pokemon
# So it tends to be more accurate than simple IV perfection
self.ivcp = self._compute_cp_perfection()
# Exact value of current CP (not rounded)
self.cp_exact = _calc_cp(
base_attack, base_defense, base_stamina,
self.iv_attack, self.iv_defense, self.iv_stamina, self.cp_m)
#assert max(int(self.cp_exact), 10) == self.cp
# Percent of maximum possible CP
self.cp_percent = self.cp_exact / self.static.max_cp
# Get moveset instance with calculated DPS and perfection percents
self.moveset = self._get_moveset()
def __str__(self):
return self.name
def __repr__(self):
return self.name
def update_nickname(self, new_nickname):
self.nickname_raw = new_nickname
self.nickname = self.nickname_raw or self.name
def can_evolve_now(self):
if self.evolution_item is None:
return self.has_next_evolution() and \
self.candy_quantity >= self.evolution_cost
else:
evo_items = items().get(self.evolution_item).count
return self.has_next_evolution() and \
self.candy_quantity >= self.evolution_cost and \
evo_items >= self.evolution_items_needed
def has_next_evolution(self):
return self.static.has_next_evolution
def has_seen_next_evolution(self):
for pokemon_id in self.next_evolution_ids:
if pokedex().captured(pokemon_id):
return True
return False
@property
def family_id(self):
return self.static.family_id
@property
def first_evolution_id(self):
return self.static.first_evolution_id
@property
def prev_evolution_id(self):
return self.static.prev_evolution_id
@property
def next_evolution_ids(self):
return self.static.next_evolution_ids
@property
def last_evolution_ids(self):
return self.static.last_evolution_ids
@property
def candy_quantity(self):
return candies().get(self.pokemon_id).quantity
@property
def evolution_cost(self):
return self.static.evolution_cost
@property
def evolution_item(self):
return self.static.evolution_item
@property
def evolution_items_needed(self):
return self.static.evolution_item_needed
@property
def iv_display(self):
return '{}/{}/{}'.format(self.iv_attack, self.iv_defense, self.iv_stamina)
def _compute_iv_perfection(self):
total_iv = self.iv_attack + self.iv_defense + self.iv_stamina
iv_perfection = round((total_iv / 45.0), 2)
return iv_perfection
def _compute_cp_perfection(self):
"""
CP perfect percent is more accurate than IV perfect
We know attack plays an important role in CP, and different
pokemons have different base value, that's means 15/14/15 is
better than 14/15/15 for lot of pokemons, and if one pokemon's
base def is more than base sta, 15/15/14 is better than 15/14/15.
See https://github.com/jabbink/PokemonGoBot/issues/469
So calculate CP perfection at final level for the best of the final
evolutions of the pokemon.
"""
variants = []
iv_attack = self.iv_attack
iv_defense = self.iv_defense
iv_stamina = self.iv_stamina
cp_m = LevelToCPm.MAX_CPM
last_evolution_ids = self.last_evolution_ids
for pokemon_id in last_evolution_ids:
poke_info = Pokemons.data_for(pokemon_id)
base_attack = poke_info.base_attack
base_defense = poke_info.base_defense
base_stamina = poke_info.base_stamina
# calculate CP variants at maximum level
worst_cp = _calc_cp(base_attack, base_defense, base_stamina,
0, 0, 0, cp_m)
perfect_cp = _calc_cp(base_attack, base_defense, base_stamina,
cp_multiplier=cp_m)
current_cp = _calc_cp(base_attack, base_defense, base_stamina,
iv_attack, iv_defense, iv_stamina, cp_m)
cp_perfection = (current_cp - worst_cp) / (perfect_cp - worst_cp)
variants.append(cp_perfection)
# get best value (probably for the best evolution)
cp_perfection = max(variants)
return cp_perfection
def _get_moveset(self):
move1 = self.fast_attack
move2 = self.charged_attack
movesets = self.static.movesets
current_moveset = None
for moveset in movesets: # type: Moveset
if moveset.fast_attack == move1 and moveset.charged_attack == move2:
current_moveset = moveset
break
if current_moveset is None:
error = "Unexpected moveset [{}, {}] for #{} {}," \
" please update info in pokemon.json and create issue/PR" \
.format(move1, move2, self.pokemon_id, self.name)
# raise ValueError(error)
logging.getLogger(type(self).__name__).error(error)
current_moveset = Moveset(
move1, move2, self.static.types, self.pokemon_id)
return current_moveset
class Attack(object):
def __init__(self, data):
# self._data = data # Not needed - all saved in fields
self.id = data['id']
self.name = data['name']
self.type = Types.get(data['type'])
self.damage = data['damage']
self.duration = data['duration'] / 1000.0 # duration in seconds
# Energy addition for fast attack
# Energy cost for charged attack
self.energy = data['energy']
# Damage Per Second
# recalc for better precision
self.dps = self.damage / self.duration
# Perfection of the attack in it's type (from 0 to 1)
self.rate_in_type = .0
@property
def damage_with_stab(self):
# damage with STAB (Same-type attack bonus)
return self.damage * STAB_FACTOR
@property
def dps_with_stab(self):
# DPS with STAB (Same-type attack bonus)
return self.dps * STAB_FACTOR
@property
def effective_against(self):
return self.type.attack_effective_against
@property
def weak_against(self):
return self.type.attack_weak_against
@property
def energy_per_second(self):
return self.energy / self.duration
@property
def dodge_window(self):
# TODO: Attack Dodge Window
return NotImplemented
@property
def is_charged(self):
return False
def __str__(self):
return self.name
def __repr__(self):
return self.name
class ChargedAttack(Attack):
def __init__(self, data):
super(ChargedAttack, self).__init__(data)
@property
def is_charged(self):
return True
class Moveset(object):
def __init__(self, fm, chm, pokemon_types=(), pokemon_id=-1):
# type: (Attack, ChargedAttack, List[Type], int) -> None
if len(pokemon_types) <= 0 < pokemon_id:
pokemon_types = Pokemons.data_for(pokemon_id).types
self.pokemon_id = pokemon_id
self.fast_attack = fm
self.charged_attack = chm
# See Pokemons._process_movesets()
# See http://pokemongo.gamepress.gg/optimal-moveset-explanation
# See http://pokemongo.gamepress.gg/defensive-tactics
fm_number = 100 # for simplicity we use 100
fm_energy = fm.energy * fm_number
fm_damage = fm.damage * fm_number
fm_secs = fm.duration * fm_number
# Defender attacks in intervals of 1 second for the
# first 2 attacks, and then in intervals of 2 seconds
# So add 1.95 seconds to the quick move cool down for defense
# 1.95 is something like an average here
# TODO: Do something better?
fm_defense_secs = (fm.duration + 1.95) * fm_number
chm_number = fm_energy / chm.energy
chm_damage = chm.damage * chm_number
chm_secs = chm.duration * chm_number
damage_sum = fm_damage + chm_damage
# raw Damage-Per-Second for the moveset
self.dps = damage_sum / (fm_secs + chm_secs)
# average DPS for defense
self.dps_defense = damage_sum / (fm_defense_secs + chm_secs)
# apply STAB (Same-type attack bonus)
if fm.type in pokemon_types:
fm_damage *= STAB_FACTOR
if chm.type in pokemon_types:
chm_damage *= STAB_FACTOR
# DPS for attack (counting STAB)
self.dps_attack = (fm_damage + chm_damage) / (fm_secs + chm_secs)
# Moveset perfection percent for attack and for defense
# Calculated for current pokemon kind only, not between all pokemons
# So 100% perfect moveset can be weak if pokemon is weak (e.g. Caterpie)
self.attack_perfection = .0
self.defense_perfection = .0
# TODO: True DPS for real combat (floor(Attack/200 * MovePower * STAB) + 1)
# See http://pokemongo.gamepress.gg/pokemon-attack-explanation
def __str__(self):
return '[{}, {}]'.format(self.fast_attack, self.charged_attack)
def __repr__(self):
return '[{}, {}]'.format(self.fast_attack, self.charged_attack)
class Inventory(object):
def __init__(self, bot):
self.bot = bot
self.last_holo_timestamp_ms = 0
self.last_timestamp_ms = 0
self.pokedex = Pokedex()
self.candy = Candies()
self.items = Items()
self.applied_items = AppliedItems()
self.pokemons = Pokemons()
self.player = Player(self.bot) # include inventory inside Player?
self.egg_incubators = None
self.refresh()
self.item_inventory_size = None
self.pokemon_inventory_size = None
def refresh(self, inventory=None):
if inventory is None:
request = self.bot.api.create_request()
request.get_holo_inventory(last_timestamp_ms=self.last_timestamp_ms)
inventory = request.call()
if 'inventory_delta' not in inventory['responses']['GET_HOLO_INVENTORY']:
self.bot.logger.info("No player information found, possiblity of temp ban")
sys.exit(0)
else:
self.last_holo_timestamp_ms = (inventory['responses']
['GET_HOLO_INVENTORY']
['inventory_delta']
.get('new_timestamp_ms', 0))
else:
request = self.bot.api.create_request()
request.get_inventory(last_timestamp_ms=self.last_timestamp_ms)
inventory = request.call()
print(format(inventory))
self.last_timestamp_ms = (inventory['responses']
['GET_INVENTORY']
['inventory_delta']
.get('new_timestamp_ms', 0))
if 'GET_HOLO_INVENTORY' in inventory['responses']:
inventory = inventory['responses']['GET_HOLO_INVENTORY']['inventory_delta']['inventory_items']
else:
inventory = inventory['responses']['GET_INVENTORY']['inventory_delta']['inventory_items']
for i in (self.pokedex, self.candy, self.items, self.pokemons, self.player):
i.refresh(inventory)
# self.applied_items = [x["inventory_item_data"] for x in inventory if "applied_items" in x["inventory_item_data"]]
self.egg_incubators = [x["inventory_item_data"] for x in inventory if "egg_incubators" in x["inventory_item_data"]]
self.update_web_inventory()
def init_inventory_outfile(self):
web_inventory = os.path.join(_base_dir, "web", "inventory-%s.json" % self.bot.config.username)
if not os.path.exists(web_inventory):
self.bot.logger.info('No inventory file %s found. Creating a new one' % web_inventory)
json_inventory = []
with open(web_inventory, "w") as outfile:
json.dump(json_inventory, outfile)
def update_web_inventory(self):
web_inventory = os.path.join(_base_dir, "web", "inventory-%s.json" % self.bot.config.username)
if not os.path.exists(web_inventory):
self.init_inventory_outfile()
json_inventory = self.jsonify_inventory()
try:
with open(web_inventory, "w") as outfile:
json.dump(json_inventory, outfile)
except (IOError, ValueError) as e:
self.bot.logger.info('[x] Error while opening inventory file for write: %s' % e, 'red')
except:
raise FileIOException("Unexpected error writing to {}".web_inventory)
def jsonify_inventory(self):
json_inventory = []
json_inventory.append({"inventory_item_data": {"player_stats": self.player.player_stats}})
for pokedex in self.pokedex.all():
json_inventory.append({"inventory_item_data": {"pokedex_entry": pokedex}})
for family_id, candy in self.candy._data.items():
json_inventory.append({"inventory_item_data": {"candy": {"family_id": family_id, "candy": candy.quantity}}})
for item_id, item in self.items._data.items():
json_inventory.append({"inventory_item_data": {"item": {"item_id": item_id, "count": item.count}}})
for pokemon in self.pokemons.all_with_eggs():
json_inventory.append({"inventory_item_data": {"pokemon_data": pokemon._data}})
for inc in self.egg_incubators:
json_inventory.append({"inventory_item_data": inc})
# for item in self.applied_items:
# json_inventory.append({"inventory_applied_item_data": {"applied_item": {"item_id": item.item_id, "applied_ms": item.applied_ms, "expire_ms": item.expire_ms}}})
return json_inventory
def retrieve_inventories_size(self):
"""
Retrieves the item inventory size
:return: Nothing.
:rtype: None
"""
# TODO: Force to update it if the player upgrades its size
if self.item_inventory_size is None or self.pokemon_inventory_size is None:
request = self.bot.api.create_request()
request.get_player()
player_data = request.call()['responses']['GET_PLAYER']['player_data']
self.item_inventory_size = player_data['max_item_storage']
self.pokemon_inventory_size = player_data['max_pokemon_storage']
#
# Other
# STAB (Same-type attack bonus)
# Factor applied to attack of the same type as pokemon
STAB_FACTOR = 1.2
# Factor applied to attack when it's effective against defending pokemon type
EFFECTIVENESS_FACTOR = 1.4
# Factor applied to attack when it's weak against defending pokemon type
RESISTANCE_FACTOR = 0.714
IMMUNITY_FACTOR = 0.51
_inventory = None # type: Inventory
def _calc_cp(base_attack, base_defense, base_stamina,
iv_attack=15, iv_defense=15, iv_stamina=15,
cp_multiplier=.0):
"""
CP calculation
CP = (Attack * Defense^0.5 * Stamina^0.5 * CP_Multiplier^2) / 10
CP = (BaseAtk+AtkIV) * (BaseDef+DefIV)^0.5 * (BaseStam+StamIV)^0.5 * Lvl(CPScalar)^2 / 10
See https://www.reddit.com/r/TheSilphRoad/comments/4t7r4d/exact_pokemon_cp_formula/
See https://www.reddit.com/r/pokemongodev/comments/4t7xb4/exact_cp_formula_from_stats_and_cpm_and_an_update/
See http://pokemongo.gamepress.gg/pokemon-stats-advanced
See http://pokemongo.gamepress.gg/cp-multiplier
See http://gaming.stackexchange.com/questions/280491/formula-to-calculate-pokemon-go-cp-and-hp
:param base_attack: Pokemon BaseAttack
:param base_defense: Pokemon BaseDefense
:param base_stamina: Pokemon BaseStamina
:param iv_attack: Pokemon IndividualAttack (0..15)
:param iv_defense: Pokemon IndividualDefense (0..15)
:param iv_stamina: Pokemon IndividualStamina (0..15)
:param cp_multiplier: CP Multiplier (0.79030001 is max - value for level 40)
:return: CP as float
"""
assert base_attack > 0
assert base_defense > 0
assert base_stamina > 0
if cp_multiplier <= .0:
cp_multiplier = LevelToCPm.MAX_CPM
assert cp_multiplier > .0
return (base_attack + iv_attack) \
* ((base_defense + iv_defense)**0.5) \
* ((base_stamina + iv_stamina)**0.5) \
* (cp_multiplier ** 2) / 10
# Initialize static data in the right order
Types() # init Types
LevelToCPm() # init LevelToCPm
FastAttacks() # init FastAttacks
ChargedAttacks() # init ChargedAttacks
Pokemons() # init Pokemons
#
# Usage helpers
# TODO : Complete the doc
# Only type return have been filled for now. It helps the IDE to suggest methods of the class.
def init_inventory(bot):
"""
Initialises the cached inventory, retrieves data from the server.
:param bot: Instance of the bot.
:type bot: pokemongo_bot.PokemonGoBot
:return: Nothing.
:rtype: None
"""
global _inventory
_inventory = Inventory(bot)
def refresh_inventory(data=None):
"""
Refreshes the cached inventory, retrieves data from the server.
:return: Nothing.
:rtype: None
"""
try:
_inventory.refresh(data)
except AttributeError:
print('_inventory was not initialized')
def jsonify_inventory():
try:
return _inventory.jsonify_inventory()
except AttributeError:
print('_inventory was not initialized')
return []
def update_web_inventory():
_inventory.update_web_inventory()
def get_item_inventory_size():
"""
Access to the Item inventory size.
:return: Item inventory size.
:rtype: int
"""
_inventory.retrieve_inventories_size()
return _inventory.item_inventory_size
def get_pokemon_inventory_size():
"""
Access to the Item inventory size.
:return: Item inventory size.
:rtype: int
"""
_inventory.retrieve_inventories_size()
return _inventory.pokemon_inventory_size
def pokedex():
"""
:return:
:rtype: Pokedex
"""
# Are new pokemons added to the pokedex ?
return _inventory.pokedex
def player():
return _inventory.player
def candies():
"""
:return:
:rtype: Candies
"""
return _inventory.candy
def pokemons():
"""
:return:
:rtype: Pokemons
"""
return _inventory.pokemons
def items():
"""
Access to the cached item inventory.
:return: Instance of the cached item inventory.
:rtype: Items
"""
return _inventory.items
def applied_items():
"""
Access to the cached applied item inventory.
:return: Instance of the cached applied item inventory.
:rtype: Items
"""
return _inventory.applied_items
def types_data():
"""
:return:
:rtype: Types
"""
return Types
def levels_to_cpm():
"""
:return:
:rtype: LevelToCPm
"""
return LevelToCPm
def fast_attacks():
"""
:return:
:rtype: FastAttacks
"""
return FastAttacks
def charged_attacks():
"""
:return:
:rtype: ChargedAttack
"""
return ChargedAttacks
| PokemonGoF/PokemonGo-Bot | pokemongo_bot/inventory.py | inventory.py | py | 53,475 | python | en | code | 3,815 | github-code | 36 | [
{
"api_name": "json.load",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "pokemongo_bot.services.item_recycle_worker.ItemRecycler",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 254,
"usage_type": "call"
},
{
"... |
13097992788 | # !/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: lishuang
@description: 利用 LSTM 预测股票价格
"""
import os
from itertools import chain
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from tensorflow.keras.layers import Dense, Dropout, LSTM
from tensorflow.keras.models import load_model, Sequential
TIME_STEPS_IN = 3
TIME_STEPS_OUT = 3
EPOCHS = 300
BATCH_SIZE = 100
def series_to_supervised(series, n_in=1, n_out=1, drop_nan=True):
"""
将时间序列数据转换为适用于监督学习的数据
给定输入序列和输出序列的长度
:param series: 观察序列
:param n_in: 观测数据 input(X) 的步长,范围是 [1, len(data)],默认为1
:param n_out: 观测数据 output(y) 的步长,范围为 [0, len(data)-1],默认为1
:param drop_nan: 是否删除 NaN 行,默认为 True
:return: 适用于监督学习的数据集
"""
n_vars = 1 if type(series) is list else series.shape[1]
df = pd.DataFrame(series)
cols, names = list(), list()
# 输入序列 (t-n, ..., t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [f'var{j + 1}(t-{i})' for j in range(n_vars)]
# 预测序列 (t, t+1, ..., t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [f'var{j + 1}(t)' for j in range(n_vars)]
else:
names += [f'var{j + 1}(t+{i})' for j in range(n_vars)]
# 拼接到一起
agg = pd.concat(cols, axis=1)
agg.columns = names
if drop_nan:
agg.dropna(inplace=True)
return agg
def get_train_set(dataset, time_steps_in=1, time_steps_out=1):
"""
将数据转换为可用于监督学习的数据
:param dataset:
:param time_steps_in:
:param time_steps_out:
:return:
"""
train_dataset = np.array(dataset)
print(train_dataset)
reframed_train_dataset = np.array(series_to_supervised(train_dataset, time_steps_in, time_steps_out).values)
print(reframed_train_dataset)
train_x, train_y = reframed_train_dataset[:, :-time_steps_out], reframed_train_dataset[:, -time_steps_out:]
# 将数据集重构为符合 LSTM 要求的数据格式,即:[样本数,时间步,特征]
train_x = train_x.reshape(train_x.shape[0], time_steps_in, 1)
return train_x, train_y
def plot_img(source_dataset, train_predict):
"""
绘制图像
:param source_dataset:
:param train_predict:
:return:
"""
plt.figure(figsize=(24, 8))
# 原始数据蓝色
plt.plot(source_dataset[:, -1], c='b', label='actual')
# 训练数据绿色
plt.plot([_ for _ in train_predict], c='g', label='predict')
plt.legend()
plt.show()
def lstm_model(source_dataset, train_data, label_data, epochs, batch_size, time_steps_out):
if os.path.exists('data/model.h5'):
model = load_model('data/model.h5')
else:
model = Sequential()
# 第一层,隐藏层神经元节点个数为128,返回整个序列
model.add(
LSTM(128, return_sequences=True, activation='relu', input_shape=(train_data.shape[1], train_data.shape[2]))
)
# 第二层,隐藏层神经元节点个数128,只返回序列最后一个输出
model.add(LSTM(128, return_sequences=False))
model.add(Dropout(0.5))
# 第三层,因为是回归问题所以使用linear
model.add(Dense(time_steps_out, activation='linear'))
model.compile(loss='mean_squared_error', optimizer='adam')
model.save('./data/model.h5', overwrite=True)
# LSTM 训练
# verbose=2 为每个epoch输出一行记录;
# verbose=1 为输出进度条记录;
# verbose=0 不在标准输出流输出日志信息
res = model.fit(train_data, label_data, batch_size, epochs, verbose=2, shuffle=False)
# 模型预测
train_predict = model.predict(train_data)
train_predict_list = list(chain(*train_predict))
plt.plot(res.history['loss'], label='train')
plt.show()
print(model.summary())
plot_img(source_dataset, train_predict)
if __name__ == '__main__':
data = pd.read_csv('data/shanghai_index_1990_12_19_to_2020_03_12.csv', encoding='GB2312')
data.rename(columns={'日期': 'Date', '收盘价': 'Price'}, inplace=True)
data_set = data[['Price']].values.astype('float64')
print(data_set)
# 转换为可用于监督学习的数据
train_X, label_y = get_train_set(data_set, TIME_STEPS_IN, TIME_STEPS_OUT)
# 使用 LSTM 进行训练、预测
lstm_model(data_set, train_X, label_y, EPOCHS, BATCH_SIZE, TIME_STEPS_OUT)
| TatenLee/machine-learning | bi/core/l8/stock_lstm.py | stock_lstm.py | py | 4,659 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "pandas.DataFrame",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_nu... |
16964477743 | from bs4 import BeautifulSoup
import re
import requests
ENROLLMENT_BOUNDS = {
0: [0, 5000],
1: [5000, 15000],
2: [15000, 30000],
3: [30000, 10000000],
}
def get_college_basic(uni_id_list):
where_str = ''
order_by_str = 'ORDER BY (CASE'
counter = 1
for id in uni_id_list:
id_str = f'u.university_id = {id}'
if len(where_str) > 0:
where_str = f'{where_str} or {id_str}'
else:
where_str = f'{id_str}'
order_by_str = f'{order_by_str} WHEN {id_str} THEN {counter}'
counter += 1
order_by_str = f'{order_by_str} END) ASC'
columns = "u.university_id, u.name, u.city, u.state, u.website, u.campus_location, u.total_enrollment"
query = f'SELECT {columns} FROM university u WHERE {where_str} {order_by_str};'
print(query)
return query
def get_college(uni_id):
select_str = "SELECT u.university_id, u.name, u.city, u.state, u.website, u.campus_location, u.total_enrollment, si.control, si.religious, si.accepts_ap_credit, si.study_abroad, si.offers_rotc, si.has_football, si.has_basketball, si.ncaa_member, si.retention_rate, si.graduation_rate, adm.total_applicants, adm.total_admitted, adm.admission_rate, adm.male_applicants, adm.female_applicants, adm.male_admitted, adm.female_admitted, adm.sat_rw_25, adm.sat_rw_75, adm.sat_math_25, adm.sat_math_75, adm.act_25, adm.act_75, fin.in_state_price, fin.out_of_state_price, fin.average_price_after_aid, fin.percent_given_aid, di.percent_american_indian_native_alaskan, di.percent_asian, di.percent_hawaiian_pacific_islander, di.percent_black, di.percent_white, di.percent_hispanic, di.percent_other, di.percent_two_races"
from_str = "FROM university as u, school_info as si, admission_stats as adm, financial_stats as fin, diversity_stats as di"
where_str = f"WHERE u.university_id = {uni_id} AND u.university_id = si.university_id AND u.university_id = adm.university_id AND u.university_id = fin.university_id AND u.university_id = di.university_id"
info_query = f"{select_str}\n{from_str}\n{where_str};"
majors_query = f"SELECT program_name FROM academic_programs WHERE {uni_id} = university_id;"
return info_query, majors_query
def generate_query(params):
state = params.get('state')
state_location = int(params.get('state_location'))
campus_location = params.getlist('campus_location')
size = params.getlist('enrollment')
majors = params.getlist('study-fields')
sat_math = int(params.get('sat_math'))
sat_reading = int(params.get('sat_reading'))
act = int(params.get('act'))
tuition = int(params.get('tuition'))
financial_aid = int(params.get('financial-aid'))
religious = params.get('religious')
ap_credit = params.get('ap_credit')
study_abroad = params.get('study_abroad')
offers_rotc = params.get('offers_rotc')
ncaa = params.get('ncaa')
# Multiple score by this much for each type
location_mult = 6 - int(params.get('location_rank'))
academics_mult = 6 - int(params.get('academics_rank'))
finance_mult = 6 - int(params.get('finance_rank'))
other_mult = 6 - int(params.get('other_rank'))
# Initial SQL strings
where_str = "WHERE u.university_id = si.university_id AND u.university_id = adm.university_id AND u.university_id = fin.university_id AND u.university_id = di.university_id"
group_str = "GROUP BY u.university_id"
select_str = "SELECT u.university_id, u.name, u.city, u.state, u.website, u.campus_location, u.total_enrollment"
majors_query = ""
if majors:
majors_string = "(" + ",".join(majors) + ")"
majors_query = f"""
, (SELECT uni.university_id, COUNT(a.cip_code) as majors_count
FROM university as uni, academic_programs as a
WHERE uni.university_id = a.university_id AND a.cip_code IN {majors_string}
GROUP BY uni.university_id
ORDER BY majors_count
) as maj
"""
where_str = f"{where_str} AND u.university_id = maj.university_id"
from_str = f"FROM university as u, school_info as si, admission_stats as adm, financial_stats as fin, diversity_stats as di {majors_query}"
# State selection
location_order = []
if state_location == 0:
location_order.append(f"(SUM(u.state = '{state}'))")
elif state_location == 1:
location_order.append(f"(SUM(u.state = '{state}') * 3)")
elif state_location == 2:
location_order.append(f"SUM(u.state = '{state}')")
# Campus location and enrollment
if campus_location:
location_str = "('" + "', '".join(campus_location) + "')"
location_order.append(f"SUM(u.campus_location IN {location_str})")
if size:
lower_bound = ENROLLMENT_BOUNDS[int(min(size))][0]
upper_bound = ENROLLMENT_BOUNDS[int(max(size))][1]
location_order.append(f"SUM(u.total_enrollment >= {lower_bound})")
location_order.append(f"SUM(u.total_enrollment <= {upper_bound})")
location_str = "((" + " + ".join(location_order) + \
f") * {location_mult}) as location_score"
if location_order:
select_str = f"{select_str}, {location_str}"
else:
select_str = f"{select_str}, 0 as location_score"
# SAT/ACT Scores
academics_order = []
if sat_math > 200:
academics_order.append(f"SUM(adm.sat_math_25 <= {sat_math})")
if sat_reading > 200:
academics_order.append(f"SUM(adm.sat_rw_25 <= {sat_reading})")
if act > 1:
academics_order.append(f"SUM(adm.act_25 <= {act})")
if majors:
academics_str = "(maj.majors_count + (" + " + ".join(academics_order) + \
f") * {academics_mult}) as academics_score"
else:
academics_str = "((" + " + ".join(academics_order) + \
f") * {academics_mult}) as academics_score"
if academics_order:
select_str = f"{select_str}, {academics_str}"
else:
select_str = f"{select_str}, 0 as academics_score"
# Financial
finance_order = []
if tuition:
if financial_aid:
finance_order.append(
f"SUM(fin.percent_given_aid >= 50) + SUM(fin.average_price_after_aid <= {tuition})")
else:
price_state = "in_state_price" if state_location == 1 else "out_of_state_price"
finance_order.append(f"SUM(fin.{price_state} <= {tuition})")
finance_str = "((" + " + ".join(finance_order) + \
f") * {finance_mult}) as finance_score"
if finance_order:
select_str = f"{select_str}, {finance_str}"
else:
select_str = f"{select_str}, 0 as finance_score"
# Other preferences
other_order = []
if religious is not None:
if int(religious):
other_order.append("SUM(si.religious = 1)")
else:
other_order.append("SUM(si.religious = 0)")
if ap_credit is not None and int(ap_credit):
other_order.append("SUM(si.accepts_ap_credit = 1)")
if study_abroad is not None and int(study_abroad):
other_order.append("SUM(si.study_abroad = 1)")
if offers_rotc is not None and int(offers_rotc):
other_order.append("SUM(si.offers_rotc = 1)")
if ncaa is not None and int(ncaa):
other_order.append("SUM(si.ncaa_member = 1)")
other_str = "((" + " + ".join(other_order) + \
f") * {other_mult}) as other_score"
if other_order:
select_str = f"{select_str}, {other_str}"
else:
select_str = f"{select_str}, 0 as other_score"
# Academic priority
order_str = "ORDER BY"
ranks = {
location_mult: "location_score DESC",
academics_mult: "academics_score DESC",
finance_mult: "finance_score DESC",
other_mult: "other_score DESC",
}
order = [ranks[rank] for rank in range(5, 1, -1)]
order_str = "ORDER BY " + ", ".join(order) + ", u.name ASC"
limit_str = "LIMIT 21"
columns = "university_id, name, city, state, website, campus_location, total_enrollment"
return (f"{select_str}\n{from_str}\n{where_str}\n{group_str}\n{order_str}\n{limit_str};", order)
def find_major(majors_list):
# second advanced function
# NOTE: this does not seem too advanced, so maybe we can parse each major / type to describe what it is
# calculate quiz output
major_dict = {}
for l in majors_list:
for major in l:
major_dict[major] = major_dict.get(major, 0) + 1
major_type = max(major_dict, key=major_dict.get)
# query for majors based on type
query = "SELECT major FROM majors_info WHERE type='" + \
str(major_type) + "'"
return major_type, query
def get_major_type_info(major):
URL = "https://acd.iupui.edu/explore/choose-your-major/connect-majors-to-careers/interests/" + \
major.lower() + "/index.html"
html_text = requests.get(URL).text
soup = BeautifulSoup(html_text, 'html.parser')
sub_title = str(soup.find('h3'))[4:-5]
desc = str(soup.find_all('div', class_="text", string=re.compile("These")))
desc = desc.split('. ', 1)[1]
desc = desc.split(' If not,')[0]
return sub_title, desc
def get_major_info(major_name):
major_path = "https://www.mymajors.com/college-majors/"
name = re.sub("[^a-zA-z]+", " ", major_name)
major = "-".join(name.lower().split())
url = major_path + major
page = requests.get(url).text
default_text = "A general program that focuses on law and legal issues from the perspective of the social sciences and humanities."
soup = BeautifulSoup(page, "html.parser")
desc = soup.find("p", {"class": "lead"}).get_text()
desc_text = desc.split('\n')[1].strip()
if desc_text == default_text:
return False
class_list = soup.find("ul", {"class": "cols3"})
classes = []
if type(class_list) is None:
classes = ["No Data Found"]
else:
class_list = class_list.get_text().strip().split('\n')
classes = [cl.strip() for cl in class_list if cl.strip()]
career_url = "https://www.mymajors.com/careers/" + major + "-major"
careers_page = requests.get(career_url).text
soup = BeautifulSoup(careers_page, "html.parser")
job_list = soup.find("ul", {"class": "cols2"})
jobs = []
if job_list is None:
jobs = ["No Data Found"]
else:
job_list = job_list.get_text().strip().split('\n')
jobs = [job.strip() for job in job_list if job.strip()]
salary_data = soup.find_all("td")
salaries = {}
for i in range(0, len(salary_data), 2):
if (i + 1) < len(salary_data):
label = salary_data[i].get_text().strip()
salary = salary_data[i+1].get_text().strip()
salaries[label] = salary
if not len(salaries):
salaries["Data"] = "Not Found"
return desc_text, classes, jobs, salaries
| michaelpri10/collegecalculator | query_schools.py | query_schools.py | py | 10,807 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 212,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_numbe... |
34222275021 | from django.shortcuts import render, redirect
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm
from django.contrib.auth import login as auth_login, logout as auth_logout
from django.views.decorators.http import require_http_methods
from .forms import CustomUserCreationForm
@require_http_methods(['GET', 'POST'])
def login(request):
if request.method == 'POST':
# 다른 form과 인자 구성이 다름
form = AuthenticationForm(request, request.POST)
if form.is_valid():
user = form.get_user()
auth_login(request, user)
next = request.GET.get('next')
return redirect(next or 'board:board_index')
else:
form = AuthenticationForm()
context = {
'form' : form,
}
return render(request, 'accounts/login.html', context)
def logout(request):
auth_logout(request)
return redirect('board:board_index')
@require_http_methods(['GET', 'POST'])
def signup(request):
if request.method == 'POST':
form = CustomUserCreationForm(request.POST)
if form.is_valid():
user = form.save()
auth_login(request, user)
return redirect('board:board_index')
else:
form = CustomUserCreationForm()
context = {'form' : form}
return render(request, 'accounts/signup.html', context)
| kimhyunso/exampleCode | django/ONE_TO_MANY/accounts/views.py | views.py | py | 1,375 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.contrib.auth.forms.AuthenticationForm",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.login",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 16,
"usage_type": "call... |
39332540750 | from django.shortcuts import render
from abb.models import visitors
from abb.forms import visitorform
# Create your views here.
def show_data(request):
form=visitorform()
if request.method=='POST':
form=visitorform(request.POST)
if form.is_valid():
name=form.cleaned_data['name']
mobile_no=form.cleaned_data['mobile_no']
feedback=form.cleaned_data['feedback']
reg=visitors(name=name,mobile_no=mobile_no,feedback=feedback)
reg.save()
form=visitorform()
return render(request,'abb/home.html',{'form':form}) | dhokanerahul13/4-11-22 | hotel/abb/views.py | views.py | py | 610 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "abb.forms.visitorform",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "abb.forms.visitorform",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "abb.models.visitors",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "abb.fo... |
17849902067 | from scripts.src.common.config import Color, Keywords, Direct, DataType
from common_and_plotting_functions.functions import check_and_mkdir_of_direct
from scripts.src.common.plotting_functions import multi_row_col_bar_plot
from scripts.data.common_functions import common_data_loader
from scripts.model.model_loader import model_loader, ModelList
from scripts.src.core.model.model_constructor import common_model_constructor
from ...common.result_processing_functions import common_flux_comparison_func
data_wrap_obj, keyword = common_data_loader(DataType.renal_carcinoma, test_mode=False, natural_anti_correction=True)
# user_defined_model = model_loader(ModelList.invivo_infusion_model)
user_defined_model = model_loader(ModelList.base_model_with_glc_tca_buffer)
mfa_model_obj = common_model_constructor(user_defined_model)
name = 'renal_carcinoma_invivo_infusion'
# output_folder = '{}/{}'.format(Direct.output_direct, name)
# raw_data_folder = '{}/{}'.format(output_folder, Direct.raw_flux_analysis_direct)
class SpecificParameter(object):
test_dynamic_constant_flux_list = []
test_preset_constant_flux_value_dict = {
# 'GLC_total_input': 100,
'GLC_supplement_net': 0,
'CIT_supplement_net': 0,
# 'GLN_input': 80,
'ASP_input': 50,
# 'SER_input': 8,
# 'ALA_input': 20,
# 'OAC_supplement_net': 0,
}
specific_flux_range_dict = {
'Salvage_c': (1, 10),
}
test_data_param_raw_list = [
{
keyword.tissue: keyword.kidney,
'': [
{
keyword.patient: 1,
'': [
{
keyword.index: 1,
},
{
keyword.index: 2,
},
]
}]
}
]
complete_data_param_raw_list = [
{
keyword.tissue: keyword.kidney,
'': [
{
keyword.patient: 1,
'': [
# {
# keyword.index: 1,
# },
# {
# keyword.index: 2,
# },
# {
# keyword.index: 3,
# },
{
keyword.index: Keywords.average,
},
]
},
{
keyword.patient: 2,
'': [
# {
# keyword.index: 1,
# },
# {
# keyword.index: 2,
# },
# {
# keyword.index: 3,
# },
{
keyword.index: Keywords.average,
},
]
},
{
keyword.patient: 3,
'': [
# {
# keyword.index: 1,
# },
# {
# keyword.index: 2,
# },
# {
# keyword.index: 3,
# },
{
keyword.index: Keywords.average,
},
]
},
{
keyword.patient: 4,
'': [
# {
# keyword.index: 1,
# },
# {
# keyword.index: 2,
# },
# {
# keyword.index: 3,
# },
{
keyword.index: Keywords.average,
},
]
},
{
keyword.patient: 5,
'': [
# {
# keyword.index: 1,
# },
# {
# keyword.index: 2,
# },
# {
# keyword.index: 3,
# },
{
keyword.index: Keywords.average,
},
]
},
]
},
{
keyword.tissue: keyword.carcinoma,
'': [
{
keyword.patient: 1,
'': [
# {
# keyword.index: 1,
# },
# {
# keyword.index: 2,
# },
# {
# keyword.index: 3,
# },
{
keyword.index: Keywords.average,
},
]
},
{
keyword.patient: 2,
'': [
# {
# keyword.index: 1,
# },
# {
# keyword.index: 2,
# },
# {
# keyword.index: 3,
# },
{
keyword.index: Keywords.average,
},
]
},
{
keyword.patient: 3,
'': [
# {
# keyword.index: 1,
# },
# {
# keyword.index: 2,
# },
# {
# keyword.index: 3,
# },
{
keyword.index: Keywords.average,
},
]
},
{
keyword.patient: 4,
'': [
# {
# keyword.index: 1,
# },
# {
# keyword.index: 2,
# },
# {
# keyword.index: 3,
# },
{
keyword.index: Keywords.average,
},
]
},
{
keyword.patient: 5,
'': [
# {
# keyword.index: 1,
# },
# {
# keyword.index: 2,
# },
# {
# keyword.index: 3,
# },
{
keyword.index: Keywords.average,
},
]
},
]
},
{
keyword.tissue: keyword.brain,
'': [
{
keyword.patient: 1,
'': [
# {
# keyword.index: 1,
# },
# {
# keyword.index: 2,
# },
# {
# keyword.index: 3,
# },
{
keyword.index: Keywords.average,
},
]
},
{
keyword.patient: 2,
'': [
# {
# keyword.index: 1,
# },
# {
# keyword.index: 2,
# },
# {
# keyword.index: 3,
# },
{
keyword.index: Keywords.average,
},
]
},
{
keyword.patient: 3,
'': [
# {
# keyword.index: 1,
# },
# {
# keyword.index: 2,
# },
# {
# keyword.index: 3,
# },
{
keyword.index: Keywords.average,
},
]
},
{
keyword.patient: 4,
'': [
# {
# keyword.index: 1,
# },
# {
# keyword.index: 2,
# },
# {
# keyword.index: 3,
# },
{
keyword.index: Keywords.average,
},
]
},
{
keyword.patient: 5,
'': [
# {
# keyword.index: 1,
# },
# {
# keyword.index: 2,
# },
# {
# keyword.index: 3,
# },
{
keyword.index: Keywords.average,
},
]
},
]},
]
important_flux_list = [
# 'GLC_unlabelled_input',
('FBA_c', 'FBA_c__R'),
'PC_m',
'G6PDH2R_PGL_GND_c',
'PDH_m',
'CS_m',
'AKGD_m',
'ICDH_m',
'PYK_c',
('SUCD_m', 'SUCD_m__R'),
('FUMH_m', 'FUMH_m__R'),
('CIT_trans__R', 'CIT_trans'),
('MDH_c', 'MDH_c__R'),
('MDH_m', 'MDH_m__R'),
('GPT_c', 'GPT_c__R'),
'PHGDH_PSAT_PSP_c',
('LDH_c', 'LDH_c__R'),
'Salvage_c',
'ACITL_c',
'PEPCK_c',
('GAPD_c', 'GAPD_c__R'),
'GLC_input',
'GLN_input',
('SHMT_c', 'SHMT_c__R'),
('AKGMAL_m__R', 'AKGMAL_m')
]
mid_name_list = [
['3PG_c', 'SER_c', 'PYR_c+PYR_m', 'LAC_c'],
['ALA_c', 'CIT_c+CIT_m', 'SUC_m', 'FUM_m'],
['MAL_c+MAL_m', 'GLU_c+GLU_m', 'GLN_c+GLN_m', 'ASP_c+ASP_m']
]
def data_param_list_generator(param_raw_list):
current_param_list = []
for labeling_param_dict in param_raw_list:
labeling_key = labeling_param_dict[keyword.tissue]
labeling_content_list = labeling_param_dict['']
for type_param_dict in labeling_content_list:
type_key = type_param_dict[keyword.patient]
type_content_list = type_param_dict['']
for index_param_dict in type_content_list:
index_key = index_param_dict[keyword.index]
current_param_list.append({
keyword.tissue: labeling_key,
keyword.patient: type_key,
keyword.index: index_key,
Keywords.obj_threshold_key: None
})
return current_param_list
data_param_raw_list = complete_data_param_raw_list
total_param_list = data_param_list_generator(data_param_raw_list)
def collect_results(final_data_obj):
final_mapping_dict = {}
for param_dict in total_param_list:
tissue_key = param_dict[keyword.tissue]
patient_key = param_dict[keyword.patient]
index_key = param_dict[keyword.index]
project_name = data_wrap_obj.project_name_generator(tissue_key, patient_key, index_key)
final_mapping_dict[project_name] = (tissue_key, patient_key, index_key)
final_data_obj.load_current_result_label(project_name)
if Keywords.obj_threshold_key in final_data_obj.final_information_dict[project_name]:
del final_data_obj.final_information_dict[project_name][Keywords.obj_threshold_key]
return final_mapping_dict
def experimental_results_comparison_parameter_generator(target_metabolite_data_dict):
final_color_dict = {}
final_data_dict_for_plotting = {}
for labeling_key, each_labeling_data_dict in target_metabolite_data_dict.items():
data_dict_for_plotting = {}
color_dict = {}
for type_key, each_type_data_dict in each_labeling_data_dict.items():
for index_key, each_index_data_dict in each_type_data_dict.items():
current_type_index_name = '{}_{}'.format(type_key, index_key)
if current_type_index_name not in color_dict:
if type_key == 1:
current_color = Color.blue
elif type_key == 2:
current_color = Color.orange
else:
raise ValueError()
color_dict[current_type_index_name] = current_color
for metabolite_name, each_metabolite_data_obj in each_index_data_dict.items():
if metabolite_name not in data_dict_for_plotting:
data_dict_for_plotting[metabolite_name] = {}
data_dict_for_plotting[metabolite_name][current_type_index_name] = \
each_metabolite_data_obj.data_vector
final_data_dict_for_plotting[labeling_key] = data_dict_for_plotting
final_color_dict[labeling_key] = color_dict
return keyword.output_folder, final_data_dict_for_plotting, final_color_dict
def flux_comparison_parameter_generator(final_solution_data_dict, final_flux_name_index_dict):
from ...common.config import index_calculation_func_dict
current_index_name_func_dict = index_calculation_func_dict
final_dict_for_flux_comparison_plotting = {}
final_key_name_parameter_dict = {}
final_color_dict = {}
comparison_name_tissue_dict = {
'kidney_tumor_vs_brain': [keyword.carcinoma, keyword.brain],
'tumor_vs_kidney': [keyword.kidney, keyword.carcinoma]}
current_important_flux_list = list(important_flux_list)
if current_index_name_func_dict is not None:
current_important_flux_list.extend(current_index_name_func_dict.items())
patient_key_set = set()
for comparison_name, current_tissue_name_list in comparison_name_tissue_dict.items():
data_dict_for_plotting = {}
key_name_parameter_dict = {}
color_dict = {}
data_dict_order_list = []
if comparison_name == 'kidney_tumor_vs_brain':
for tissue_index, tissue_name in enumerate(current_tissue_name_list):
for patient_key, current_patient_dict in final_solution_data_dict[tissue_name].items():
if patient_key not in patient_key_set:
patient_key_set.add(patient_key)
current_flux_name_index_dict = final_flux_name_index_dict[tissue_name][patient_key]
data_dict_order_list.append(
(tissue_index, tissue_name, patient_key, current_patient_dict, current_flux_name_index_dict))
elif comparison_name == 'tumor_vs_kidney':
for patient_key in patient_key_set:
for tissue_index, tissue_name in enumerate(current_tissue_name_list):
current_patient_dict = final_solution_data_dict[tissue_name][patient_key]
current_flux_name_index_dict = final_flux_name_index_dict[tissue_name][patient_key]
data_dict_order_list.append(
(tissue_index, tissue_name, patient_key, current_patient_dict, current_flux_name_index_dict))
else:
raise ValueError()
for tissue_index, tissue_name, patient_key, current_patient_dict, current_flux_name_index_dict in data_dict_order_list:
for index_num, current_data_array in current_patient_dict.items():
key_name = '{}_{}_{}'.format(tissue_name, patient_key, index_num)
key_name_parameter_dict[key_name] = (tissue_name, patient_key, index_num)
if tissue_index == 0:
color_dict[key_name] = Color.blue
else:
color_dict[key_name] = Color.orange
common_flux_comparison_func(
current_important_flux_list, current_flux_name_index_dict[index_num], current_data_array,
data_dict_for_plotting, key_name)
final_dict_for_flux_comparison_plotting[comparison_name] = data_dict_for_plotting
final_key_name_parameter_dict[comparison_name] = key_name_parameter_dict
final_color_dict[comparison_name] = color_dict
return final_dict_for_flux_comparison_plotting, final_key_name_parameter_dict, final_color_dict
def experimental_data_plotting(
complete_experimental_mid_data_obj_dict, complete_result_information_dict, output_direct):
mid_data_dict_for_plotting = {}
raw_data_dict_for_plotting = {}
color_dict = {}
patient_color_dict = {}
complete_color_list = [Color.blue, Color.orange, Color.purple]
for result_label, experimental_mid_data_obj_dict in complete_experimental_mid_data_obj_dict.items():
result_information_dict = complete_result_information_dict[result_label]
tissue = result_information_dict[keyword.tissue]
patient = result_information_dict[keyword.patient]
index = result_information_dict[keyword.index]
if index == Keywords.average:
continue
if tissue not in mid_data_dict_for_plotting:
mid_data_dict_for_plotting[tissue] = {}
raw_data_dict_for_plotting[tissue] = {}
for metabolite_name, mid_data_obj in experimental_mid_data_obj_dict.items():
current_mid_label_tissue_dict = mid_data_dict_for_plotting[tissue]
current_raw_label_tissue_dict = raw_data_dict_for_plotting[tissue]
if metabolite_name not in current_mid_label_tissue_dict:
current_mid_label_tissue_dict[metabolite_name] = {}
current_raw_label_tissue_dict[metabolite_name] = {}
patient_index_str = f'{patient}_{index}'
current_mid_label_tissue_dict[metabolite_name][patient_index_str] = mid_data_obj.data_vector
current_raw_label_tissue_dict[metabolite_name][patient_index_str] = mid_data_obj.raw_data_vector
if patient_index_str not in color_dict:
if patient not in patient_color_dict:
current_color = complete_color_list[len(patient_color_dict) % 3]
patient_color_dict[patient] = current_color
color_dict[patient_index_str] = patient_color_dict[patient]
target_emu_name_nested_list = [
['glucose', '3-phosphoglycerate', 'pyruvate', 'lactate'],
['alanine', 'citrate', 'succinate', 'fumarate'],
['malate', 'aspartate', 'glutamate', 'glutamine'],
]
target_row_num = len(target_emu_name_nested_list)
target_col_num = len(target_emu_name_nested_list[0])
for tissue, each_tissue_mid_data_dict_for_plotting in mid_data_dict_for_plotting.items():
each_tissue_raw_data_dict_for_plotting = raw_data_dict_for_plotting[tissue]
for raw_data in (False, True):
if raw_data:
parent_direct = 'raw_data'
complete_data_dict = each_tissue_raw_data_dict_for_plotting
ylim = (0, None)
else:
parent_direct = 'mid_data'
complete_data_dict = each_tissue_mid_data_dict_for_plotting
ylim = (0, 1)
current_title = f'{tissue}'
current_output_direct = '{}/{}'.format(output_direct, parent_direct)
check_and_mkdir_of_direct(current_output_direct)
multi_row_col_bar_plot(
complete_data_dict, target_emu_name_nested_list, target_row_num, target_col_num,
error_bar_data_dict=None, color_dict=color_dict, title_dict=None,
output_direct=current_output_direct, current_title=current_title, ylim=ylim,
xlabel_list=None, figsize=None, legend=False)
def result_output_dataframe_dict_generator(complete_result_dict):
pass
# return {'Sheet1': pd.DataFrame()}
def metabolic_network_parameter_generator():
experimental_mid_metabolite_set = {
'PYR_c', 'PYR_m',
'LAC_c',
'ALA_c',
'SUC_m',
'FUM_m',
'SER_c',
'MAL_c', 'MAL_m',
'ASP_c', 'ASP_m',
'GLU_c', 'GLU_m',
'GLN_c', 'GLN_m',
'CIT_c', 'CIT_m',
'3PG_c',
}
experimental_mixed_mid_metabolite_set = {
'PYR_c', 'PYR_m',
'MAL_c', 'MAL_m',
'ASP_c', 'ASP_m',
'GLU_c', 'GLU_m',
'GLN_c', 'GLN_m',
'CIT_c', 'CIT_m',
}
biomass_metabolite_set = {
'ALA_c', 'RIB5P_c', 'GLY_c', 'SER_c', 'ASP_c',
'ACCOA_c', 'GLU_c', 'GLN_c',
}
input_metabolite_set = {
'GLC_e', 'GLC_unlabelled_e', 'GLN_e', 'ASP_e', 'SER_e', 'GLY_e', 'ALA_e', 'LAC_e',
}
c13_labeling_metabolite_set = {
'GLC_e',
}
boundary_flux_set = {
'GLC_input', 'GLC_unlabelled_input'
}
infusion = False
return experimental_mid_metabolite_set, experimental_mixed_mid_metabolite_set, biomass_metabolite_set, \
input_metabolite_set, c13_labeling_metabolite_set, boundary_flux_set, infusion
| LocasaleLab/Automated-MFA-2023 | scripts/src/experimental_data_analysis/specific_data_model_combination/renal_carcinoma_invivo_infusion.py | renal_carcinoma_invivo_infusion.py | py | 21,216 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "scripts.data.common_functions.common_data_loader",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "scripts.src.common.config.DataType.renal_carcinoma",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "scripts.src.common.config.DataType",
... |
17096720993 | # #############################################################################
# 本題參數設定,請勿更改
seed = 0 # 亂數種子數
# #############################################################################
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import pandas as pd
evaluation = pd.DataFrame({'Model': [],
'Details':[],
'RMSE (test)':[],
'R2 (train)':[],
'adj. R2 (train)':[],
'R2 (test)':[],
'adj. R2 (test)':[]}
)
# 讀取台北市房價資料集
df = pd.read_csv('Taipei_house.csv')
# 對"行政區"進行 one-hot encoding
df = pd.get_dummies(df, columns=['行政區'])
# 處理"車位類別"
df['車位類別'] = [0 if x=='無' else 1 for x in df['車位類別']]
# 計算 Adjusted R-squared
def adj_R2(r2, n, k):
""" 函式描述:計算 Adjusted R-squared
參數:
r2:R-squared 數值
n: 樣本數
k: 特徵數
回傳:
Adjusted R-squared
"""
return r2-(k-1)/(n-k)*(1-r2)
from sklearn.metrics import mean_squared_error
def measurement(model, X_train, X_test):
y_pred = model.predict(X_test)
rmse = round(np.sqrt(mean_squared_error(y_test, y_pred)), 0)
r2_train = round(model.score(X_train, y_train), 4)
adj_r2_train = round(adj_R2(model.score(X_train, y_train),
X_train.shape[0], X_train.shape[1]), 4)
r2_test = round(model.score(X_test, y_test), 4)
adj_r2_test = round(adj_R2(model.score(X_test, y_test),
X_test.shape[0], X_test.shape[1]), 4)
return [rmse, r2_train, adj_r2_train, r2_test, adj_r2_test]
# 切分訓練集(80%)、測試集(20%)
features= ['土地面積', '建物總面積', '屋齡', '樓層', '總樓層', '用途',
'房數', '廳數', '衛數', '電梯', '車位類別',
'行政區_信義區', '行政區_大安區', '行政區_文山區','行政區_松山區']
target = '總價'
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(df[features],
df[target],
test_size=0.2,
random_state=seed)
lst_model, lst_info = [], []
# 複迴歸(參數皆為預設值)
# #########################################################################
# '行政區_信義區', '行政區_大安區', '行政區_文山區','行政區_松山區' 四個特徵是經過
# one-hot encoding 後產生,若欄位名稱不同可自行修改之。
# #########################################################################
from sklearn import linear_model
lst_model.append(linear_model.LinearRegression())
lst_info.append(['複迴歸','15 features'])
# 脊迴歸(Ridge regression),除以下參數設定外,其餘為預設值
# #########################################################################
# alpha=10
# #########################################################################
lst_model.append(linear_model.Ridge(alpha=10))
lst_info.append(['Ridge','15 features'])
# 多項式迴歸,除以下參數設定外,其餘為預設值
# #########################################################################
# degree=2
# #########################################################################
from sklearn.preprocessing import PolynomialFeatures
poly_fea = PolynomialFeatures(degree=2)
X_train_poly = poly_fea.fit_transform(X_train)
X_test_poly = poly_fea.fit_transform(X_test)
lst_model.append(linear_model.LinearRegression())
lst_info.append(['多項式迴歸','deg=2'])
# 多項式迴歸 + Lasso迴歸,除以下參數設定外,其餘為預設值
# #########################################################################
# alpha=10
# #########################################################################
lst_model.append(linear_model.Lasso(alpha=10))
lst_info.append(['多項式迴歸+Lasso','deg=2'])
idx = evaluation.shape[0]
for i in range(len(lst_model)):
if '多項式' in lst_info[i][0]:
X_train, X_test = X_train_poly, X_test_poly
model = lst_model[i].fit(X_train, y_train)
row = lst_info[i] + measurement(model, X_train, X_test)
evaluation.loc[idx+i] = row
print('對訓練集的最大 Adjusted R-squared: %.4f' % max(evaluation['adj. R2 (train)']))
print('對測試集的最小 RMSE:%d' % min(evaluation['RMSE (test)']))
print('兩個模型對測試集的最大 Adjusted R-squared: %.4f' %
max(evaluation.loc[:1, 'adj. R2 (test)']))
''' 預測 '''
# 利用所有資料重新擬合模型,並進行預測
X = df[features]
y = df[target]
X_poly = poly_fea.fit_transform(X)
#features= ['土地面積', '建物總面積', '屋齡', '樓層', '總樓層', '用途',
# '房數', '廳數', '衛數', '電梯', '車位類別',
# '行政區_信義區', '行政區_大安區', '行政區_文山區','行政區_松山區']
new = np.array([36, 99, 32, 4, 4, 0, 3, 2, 1, 0, 0, 0, 0, 0, 1]).reshape(1, -1)
df_new = pd.DataFrame(new, columns=features)
df_poly_fea = poly_fea.fit_transform(df_new)
lst = evaluation['adj. R2 (test)'].tolist()
idx = lst.index(max(lst))
if idx <=1:
model = lst_model[idx].fit(X, y)
print('房價預測結果:%d' % model.predict(df_new))
else:
model = lst_model[idx].fit(X_poly, y)
print('房價預測結果:%d' % model.predict(df_poly_fea))
| neochen2701/TQCPans | 機器學習Python 3答案檔/MLA305.py | MLA305.py | py | 5,623 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "warnings.filterwarnings",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pandas.get_du... |
16908305317 | # import package
import os, random, tweepy, time, schedule
def job():
folder=r"F:\mat\twitterbot\pics" # Set your folder here, twitter allows .jpg, .jpeg, .png, .gif and mp4 videos to be uploaded as media.
# Current limitations as of 18/07/2022: Images 5MB, gifs 15mb, MP4 512mb(when using media_category=amplify)
a=random.choice(os.listdir(folder)) # I recommended to have only these files formats in your folder otherwise it might result in an error.
print(a)
#os.open(a, os.O_RDWR) # Do not remove the #
from PIL import Image # You might have to 'pip install pillow' on command prompt
img = folder+'\\'+a
print(img)
print("Media successfully picked")
upload(img)
CONSUMER_KEY = ""
CONSUMER_SEC = ""
AUTH_ACC = ""
AUTH_SEC = ""
BEARER = ""
# 1.1 API
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SEC)
auth.set_access_token(AUTH_ACC, AUTH_SEC)
api = tweepy.API(auth, wait_on_rate_limit=True)
# 2.0 API
client = tweepy.Client(BEARER, CONSUMER_KEY, CONSUMER_SEC, AUTH_ACC, AUTH_SEC, wait_on_rate_limit=True)
try:
api.verify_credentials()
print("V1.1 Authentication OK")
except Exception as e:
print(f"Error during authentication: {e}")
# upload media
def upload(filename):
status_text = "" # Insert your text message here if you will or leave in blank otherwise for no text message.
media_info = api.media_upload(filename)
posted_status_v2 = client.create_tweet(text=status_text, media_ids=[media_info.media_id])
print("Uploaded successfully")
return 0
schedule.every().day.at("13:00").do(job) # Set the hour of the day the bot will start running check https://schedule.readthedocs.io/en/stable/ for more info.
while True:
schedule.run_pending()
time.sleep(1)
##Credit for help
##STC, Merceal, Asplosions in Offline Chat
##Supe from Tweepy Discord server
| mdsmendes94/twitterdailybot | ttbot.pyw | ttbot.pyw | pyw | 1,988 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "random.choice",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "tweepy.OAuthHandler",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "tweepy.API",
"line_num... |
41473256040 | from PyQt5.QtWidgets import QLabel, QApplication, QDialog, QGridLayout, QHBoxLayout, QPushButton, QFormLayout, \
QWidget, \
QLineEdit
import sys
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QIcon
class calculator_frame(QDialog):
def __init__(self):
super().__init__()
self.shower = QLineEdit()
self.label = QLabel()
self.init()
self.tmp_string_num = ""
self.tmp_num = 0
self.sum_num = ""
self.num = 0
def init(self):
self.setWindowTitle("个人计算器")
self.setWindowIcon(QIcon('calculator.ico'))
self.label.setStyleSheet('font-size:20px;color:rgb(180,180,180,255);')
self.shower.setStyleSheet('font-size:20px;color:rgb(180,180,180,255);')
self.shower.setEnabled(False)
hbox = QHBoxLayout()
form_widget = QWidget()
grid_widget = QWidget()
form_layout = QFormLayout()
form_layout.addRow("memory:", self.shower)
form_layout.addRow("result=", self.label)
form_widget.setLayout(form_layout)
# self.setFixedSize(300,200)
grid_layout = QGridLayout()
one = QPushButton("1")
two = QPushButton("2")
three = QPushButton("3")
four = QPushButton("4")
five = QPushButton("5")
six = QPushButton("6")
seven = QPushButton("7")
eight = QPushButton("8")
nine = QPushButton("9")
zero = QPushButton("0")
point = QPushButton(".")
equal = QPushButton("=")
add = QPushButton("+")
sub = QPushButton("-")
mult = QPushButton("*")
div = QPushButton("/")
one.clicked.connect(self.clicker)
two.clicked.connect(self.clicker)
three.clicked.connect(self.clicker)
four.clicked.connect(self.clicker)
five.clicked.connect(self.clicker)
six.clicked.connect(self.clicker)
seven.clicked.connect(self.clicker)
eight.clicked.connect(self.clicker)
nine.clicked.connect(self.clicker)
zero.clicked.connect(self.clicker)
add.clicked.connect(self.clicker)
sub.clicked.connect(self.clicker)
mult.clicked.connect(self.clicker)
div.clicked.connect(self.clicker)
equal.clicked.connect(self.clicker)
point.clicked.connect(self.clicker)
one.setStyleSheet('font-size:32px;color:rgb(0,0,0,255);')
two.setStyleSheet('font-size:32px;color:rgb(0,0,0,255);')
three.setStyleSheet('font-size:32px;color:rgb(0,0,0,255);')
four.setStyleSheet('font-size:32px;color:rgb(0,0,0,255);')
five.setStyleSheet('font-size:32px;color:rgb(0,0,0,255);')
six.setStyleSheet('font-size:32px;color:rgb(0,0,0,255);')
seven.setStyleSheet('font-size:32px;color:rgb(0,0,0,255);')
eight.setStyleSheet('font-size:32px;color:rgb(0,0,0,255);')
nine.setStyleSheet('font-size:32px;color:rgb(0,0,0,255);')
zero.setStyleSheet('font-size:32px;color:rgb(0,0,0,255);')
point.setStyleSheet('font-size:32px;color:rgb(0,0,0,255);')
equal.setStyleSheet('font-size:32px;color:rgb(0,0,0,255);')
add.setStyleSheet('font-size:28px;color:rgb(0,0,0,255);')
sub.setStyleSheet('font-size:28px;color:rgb(0,0,0,255);')
mult.setStyleSheet('font-size:28px;color:rgb(0,0,0,255);')
div.setStyleSheet('font-size:28px;color:rgb(0,0,0,255);')
grid_layout.addWidget(add, 2, 4)
grid_layout.addWidget(div, 1, 4)
grid_layout.addWidget(mult, 0, 4)
grid_layout.addWidget(sub, 3, 4)
grid_layout.addWidget(zero, 3, 2)
grid_layout.addWidget(point, 3, 1)
grid_layout.addWidget(equal, 3, 3)
grid_layout.addWidget(one, 2, 1)
grid_layout.addWidget(two, 2, 2)
grid_layout.addWidget(three, 2, 3)
grid_layout.addWidget(four, 1, 1)
grid_layout.addWidget(five, 1, 2)
grid_layout.addWidget(six, 1, 3)
grid_layout.addWidget(seven, 0, 1)
grid_layout.addWidget(eight, 0, 2)
grid_layout.addWidget(nine, 0, 3)
grid_widget.setLayout(grid_layout)
hbox.addWidget(form_widget, 0, Qt.AlignLeft)
hbox.addWidget(grid_widget, 0, Qt.AlignRight)
self.setLayout(hbox)
def clicker(self):
num_or_opt = self.sender().text()
if num_or_opt.isdigit():
self.tmp_string_num += str(num_or_opt)
self.tmp_num = int(self.tmp_string_num)
self.shower.setText(self.tmp_string_num)
self.sum_num += self.tmp_string_num
print(self.sum_num)
self.label.setText(self.sum_num)
self.tmp_string_num=""
self.tmp_num = 0
else:
if not num_or_opt == "=":
self.tmp_string_num += num_or_opt
self.sum_num += num_or_opt
self.shower.setText(self.tmp_string_num)
self.tmp_string_num = ""
self.tmp_num = 0
else:
self.label.setText(str(eval(self.sum_num)))
if __name__ == '__main__':
app = QApplication(sys.argv)
mainFrame = calculator_frame()
mainFrame.show()
sys.exit(app.exec_())
| siuwhat/calculator | calculator_frame.py | calculator_frame.py | py | 5,213 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "PyQt5.QtWidgets.QDialog",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QLineEdit",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QLabel",
"line_number": 13,
"usage_type": "call"
},
{
"api_name"... |
19686841128 | from argparse import ArgumentParser
import tabulate
argparser = ArgumentParser()
argparser.add_argument('--device', type=int, required=True, help='id of device to run training on.')
argparser.add_argument('--seed', type=int, required=True, help='random seed to use for training.')
argparser.add_argument('--dir', type=str, help='directory for all model output, logs, checkpoints, etc.')
argparser.add_argument('--debug', action='store_true',
help='use debug mode which only uses one example to train and eval.')
argparser.add_argument('--three_d', action='store_true',
help='train a 3d model. (default: False)')
argparser.add_argument('--freeze_encoder', action='store_true',
help='train UNet with frozen encoder. (default: False).')
argparser.add_argument('--save_freq', type=int, default=1,
help='epoch frequency with which to checkpoint. (default = 1)')
args = argparser.parse_args()
import sys
import os
import random
import torch
import torchvision
import torch.optim as optim
import torch.nn as nn
from torch.utils.data import DataLoader, Subset
from torch.utils.tensorboard import SummaryWriter
import numpy as np
from tqdm import tqdm
from dataloader import BraTS20202d, BraTS20203d
from models import UNetGenerator, UNetGenerator3d
from losses import BraTSBCEWithLogitsLoss
from sklearn.model_selection import train_test_split
from utils import *
os.makedirs(args.dir, exist_ok=True)
os.makedirs(args.dir + '/logs/', exist_ok=True)
os.makedirs(args.dir + '/checkpoints/', exist_ok=True)
# sundry
seed = args.seed
device = torch.device(f'cuda:{args.device}')
# model params
input_nc = 4
output_nc = 3
num_downs = 3
ngf = 64
# optim params
lr = 8e-3
momentum = 0.9
wd = 1e-4
# train params
epochs = 150
# fix all randomness for reproducibility
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
tr_data_dir = 'brats2020/2dunet/train-preprocessed-v2/data/'
te_data_dir = 'brats2020/2dunet/test-preprocessed-v2/data/'
if args.debug:
tr_data_dir = 'brats2020/2dunet/debug/'
te_data_dir = 'brats2020/2dunet/debug/'
num_workers =0
batch_size = 1
elif args.three_d:
tr_data_dir = 'brats2020/2dunet/train/'
te_data_dir = 'brats2020/2dunet/test/'
batch_size = 5
num_workers = 4
else:
num_workers = 4
batch_size = 150
# data setup
print(f'loading training data from: {tr_data_dir}')
print(f'loading test data from: {te_data_dir}')
if args.three_d:
train_dataset = BraTS20203d(tr_data_dir)
test_dataset = BraTS20203d(te_data_dir)
else:
train_dataset = BraTS20202d(tr_data_dir, only_tumor=True)
test_dataset = BraTS20202d(te_data_dir, only_tumor=True)
train_loader = DataLoader(train_dataset, shuffle=True,
num_workers=num_workers, batch_size=batch_size)
#collate_fn=collate_fn)
# batch_size == 1 required for the image logging to work properly
test_loader = DataLoader(test_dataset, shuffle=False,
num_workers=num_workers, batch_size=1)
# model setup
if args.three_d:
model = UNetGenerator3d(input_nc, output_nc, num_downs, ngf=ngf,
freeze_encoder=args.freeze_encoder).to(device)
loss = BraTSBCEWithLogitsLoss()
optimizer = optim.SGD(model.parameters(), lr=lr, momentum=momentum, weight_decay=wd)
else:
model = UNetGenerator(input_nc, output_nc, num_downs, ngf=ngf,
freeze_encoder=args.freeze_encoder).to(device)
loss = BraTSBCEWithLogitsLoss()
optimizer = optim.SGD(model.parameters(), lr=lr, momentum=momentum, weight_decay=wd)
start_epoch = 0
writer = SummaryWriter(log_dir=f'{args.dir}/logs/{start_epoch}/')
table_out_columns = ['ep', 'train loss', 'train_dice_et', 'train_dice_wt','train_dice_tc',\
'test loss', 'test_dice_et', 'test_dice_wt','test_dice_tc']
def log_epoch(stuff):
''' convenience function for logging everything. keeps the main loop clean.'''
epoch = stuff['epoch']
writer.add_scalar('Loss/train', stuff['train_loss'], epoch)
writer.add_scalar('Dice/enhancing train', stuff['train_dice'][0], epoch)
writer.add_scalar('Dice/whole train', stuff['train_dice'][1], epoch)
writer.add_scalar('Dice/core train', stuff['train_dice'][2], epoch)
writer.add_scalar('Loss/test', stuff['test_loss'], epoch)
writer.add_scalar('Dice/enhancing test', stuff['test_dice'][0], epoch)
writer.add_scalar('Dice/whole test', stuff['test_dice'][1], epoch)
writer.add_scalar('Dice/core test', stuff['test_dice'][2], epoch)
for img in stuff['images']:
grid = torchvision.utils.make_grid(
[s.squeeze() for s in torch.split(img[1].squeeze(), 1)], nrow=1, pad_value=255)
# need a dummy 'channel' dimension for the tensorboard api
grid = grid.unsqueeze(1)
writer.add_images(f'Images/{img[0]}', grid, global_step=epoch)
# TODO: Haus, NLL, images
#writer.add_scalar('Dice/whole', np.random.random(), n_iter)
#writer.add_scalar('Dice/enhancing', np.random.random(), n_iter)
#writer.add_scalar('Dice/core', np.random.random(), n_iter)
def dice_coeff(pred, trgt):
smooth = 1.
if not pred.is_contiguous():
pred = pred.contiguous()
if not trgt.is_contiguous():
trgt = trgt.contiguous()
pflat = pred.view(-1)
tflat = trgt.view(-1)
intsc = (pflat*tflat).sum()
return (2*intsc + smooth)/ (pflat.sum() + tflat.sum() + smooth)
def compute_dice(output, target):
sigs = torch.sigmoid(output)
preds = torch.zeros(sigs.size(), dtype=torch.uint8)
preds[torch.where(sigs > 0.5)] = 1
preds = preds.cuda(output.get_device())
dice = np.zeros(3)
for i in range(3):
dice[i] = dice_coeff(preds[:, i].squeeze(), target[:, i].squeeze())
return dice
examples_to_track = [random.randint(0, len(test_dataset)) for _ in range(10)]
for epoch in range(epochs):
model.train()
stuff = {'epoch': epoch, 'images': []}
test_loss = train_loss = 0
for i, (src, tgt) in enumerate(tqdm(train_loader)):
optimizer.zero_grad()
src, tgt = src.to(device).float(), tgt.to(device).float()
output = model(src)
print(f'outputsize: {output.size()}, tgtsize {tgt.size()}')
loss_e = loss(output, tgt)
train_loss = loss_e/(i+1) + (i/(i+1))*train_loss
loss_e.backward()
optimizer.step()
stuff['train_loss'] = train_loss
with torch.no_grad():
model.eval()
train_loss = 0
stuff['train_dice'] = np.zeros(3)
for j, (src, tgt) in enumerate(tqdm(train_loader)):
src, tgt = src.to(device).float(), tgt.to(device).float()
output = model(src)
loss_e = loss(output, tgt)
# updata average loss
stuff['train_dice'] = compute_dice(output, tgt)/(j+1) + (j/(j+1))*stuff['train_dice']
train_loss = loss_e/(j+1) + (j/(j+1))*train_loss
stuff['test_loss'] = test_loss
with torch.no_grad():
model.eval()
test_loss = 0
stuff['test_dice'] = np.zeros(3)
for j, (src, tgt) in enumerate(tqdm(test_loader)):
src, tgt = src.to(device).float(), tgt.to(device).float()
output = model(src)
loss_e = loss(output, tgt)
# updata average loss
stuff['test_dice'] = compute_dice(output, tgt)/(j+1) + (j/(j+1))*stuff['test_dice']
test_loss = loss_e/(j+1) + (j/(j+1))*test_loss
if j+1 in examples_to_track:
sigs = torch.sigmoid(output)
sigs_npy = sigs.cpu().numpy()
preds = torch.zeros(sigs.size(), dtype=torch.uint8)
preds[torch.where(sigs > 0.5)] = 255
tgt_scaled = tgt*255
if len(preds[1].shape) == 3:
preds = preds[..., 100].squeeze()
img = torch.cat([preds, tgt_scaled.cpu()], dim=1).to(torch.uint8)
stuff['images'].append((j+1, img))
train_stats = [ epoch + 1, stuff['train_loss']]+ stuff['train_dice'].tolist() \
+ [stuff['test_loss']] + stuff['test_dice'].tolist()
table_train = tabulate.tabulate([train_stats], table_out_columns, tablefmt="simple", floatfmt="8.4f")
print(table_train)
# breaks if batch_size > 1
stuff['test_loss'] = test_loss
log_epoch(stuff)
if epoch % args.save_freq == 0:
torch.save({
'epoch': epoch,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'train_loss': train_loss,
'test_loss': test_loss,
'train_dice': stuff['train_dice'],
'test_dice': stuff['test_dice']
}, f'{args.dir}/checkpoints/epoch_{epoch:03}.pt'
)
| ChaseDuncan/2dunet | train.py | train.py | py | 9,434 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"lin... |
11677498007 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTIBILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#################################################
# -------------------
# Blender for Science
# -------------------
# Add-on: blendmsh
# Author: Senthur Raj (Github: imsenthur)
# Description: Blendmsh is a bridge between Blender 2.80+ and Gmsh, a fast and light 3D finite element mesh generator.
# https://github.com/blender-for-science/blendmsh
#################################################
bl_info = {
"name" : "blendmsh",
"author" : "Senthur Raj",
"description" : "Blendmsh is a bridge between Blender 2.80+ and Gmsh, a fast and light 3D finite element mesh generator.",
"blender" : (2, 80, 0),
"version" : (1, 1, 0),
"location" : "View3D",
"warning" : "",
"wiki_url" : "https://github.com/blender-for-science/blendmsh",
"tracker_url" : "https://github.com/blender-for-science/blendmsh",
"category" : "Mesh"
}
import bpy
from .properties import BlendmshProperties
from .panel import BLENDMSH_PT_Panel
from .processor import BLENDMSH_OT_Meshinit, BLENDMSH_OT_Meshproc, BLENDMSH_OT_Physicalgroups
from .preferences import BlendmshPreferences, BlendmshInstaller
def register():
bpy.utils.register_class(BlendmshPreferences)
bpy.utils.register_class(BlendmshInstaller)
bpy.utils.register_class(BlendmshProperties)
bpy.utils.register_class(BLENDMSH_PT_Panel)
bpy.types.Scene.blendmsh = bpy.props.PointerProperty(type=BlendmshProperties)
bpy.utils.register_class(BLENDMSH_OT_Meshinit)
bpy.utils.register_class(BLENDMSH_OT_Meshproc)
bpy.utils.register_class(BLENDMSH_OT_Physicalgroups)
def unregister():
bpy.utils.unregister_class(BlendmshPreferences)
bpy.utils.unregister_class(BlendmshInstaller)
bpy.utils.unregister_class(BlendmshProperties)
bpy.utils.unregister_class(BLENDMSH_PT_Panel)
bpy.utils.unregister_class(BLENDMSH_OT_Meshinit)
bpy.utils.unregister_class(BLENDMSH_OT_Meshproc)
bpy.utils.unregister_class(BLENDMSH_OT_Physicalgroups)
if __name__ == "__main__":
register() | blender-for-science/blendmsh | __init__.py | __init__.py | py | 2,626 | python | en | code | 28 | github-code | 36 | [
{
"api_name": "bpy.utils.register_class",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "preferences.BlendmshPreferences",
"line_number": 47,
"usage_type": "argument"
},
{
"api_name": "bpy.utils",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_n... |
12303872667 | from albumentations.augmentations.transforms import Normalize
import torch
import torchvision
import albumentations
import albumentations.pytorch
from adamp import AdamP
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from torchsummary import summary as summary_
from dataset import CatDogDataset
from model import MyModel
NUM_EPOCHS = 3
BATCH_SIZE = 128
device = "cuda:0" if torch.cuda.is_available() else "cpu"
train_transfrom = albumentations.Compose([
albumentations.OneOf([
albumentations.HorizontalFlip()
]),
albumentations.Resize(224, 224),
albumentations.Normalize(mean=[0.5, 0.5, 0.5], std=[0.2, 0.2, 0.2]),
albumentations.pytorch.transforms.ToTensorV2()
])
dataset = CatDogDataset(transform=train_transfrom)
train_loader = DataLoader(
dataset,
batch_size=BATCH_SIZE,
shuffle=True
)
model = MyModel()
model.to(device)
summary_(model, (3, 224, 224), batch_size=128)
optimizer = AdamP(model.parameters(), lr=1e-3, weight_decay=0)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=2, eta_min=0.)
criterion = nn.CrossEntropyLoss()
for epoch in range(NUM_EPOCHS):
model.train()
loss_val = 0
matches = 0
for idx, train_batch in enumerate(train_loader):
img, label = train_batch
img = img.float().to(device)
label = label.long().to(device)
logit = model(img)
loss = criterion(logit, label)
loss_val += loss.item()
pred = torch.argmax(logit, -1)
matches += (pred == label).sum().item()
loss_val /= BATCH_SIZE
matches /= BATCH_SIZE
optimizer.zero_grad()
print(f'Epoch : {epoch + 1}/{NUM_EPOCHS} ({idx + 1}/{len(train_loader)})\n'
f'Loss : {loss_val:.4f}\n'
f'Accuracy : {matches:.4f}\n'
f'cat Label : {label.tolist().count(0)}\n'
f'cat Pred : {pred.tolist().count(0)}\n'
f'dog Label : {label.tolist().count(1)}\n'
f'dog Pred : {pred.tolist().count(1)}\n')
loss_val = 0
matches = 0
loss.backward()
optimizer.step()
scheduler.step()
torch.save(model.state_dict(), './checkpoint.pth')
| taeyang916/kaggle_fruits | train.py | train.py | py | 2,350 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "torch.cuda.is_available",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "albumentations.Compose",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "albume... |
42493495025 | from __future__ import absolute_import, print_function, division
import os
from string import Template
import numpy as np
import theano
from theano import Apply
from theano.tensor import as_tensor_variable
from theano.tensor.sort import TopKOp
from .basic_ops import (GpuKernelBase, Kernel, infer_context_name,
as_gpuarray_variable, gpuarray_helper_inc_dir)
from .opt import register_opt, op_lifter, register_opt2
from .type import GpuArrayType
try:
import pygpu
import pygpu.gpuarray as ga
except ImportError as e:
# To make sure theano is importable
pass
# TODO GPU sort / argsort
class GpuTopKOp(GpuKernelBase, TopKOp):
'''Implements TopKOp on gpu
Currently the output seem sorted, but we do not test it. So as on
the CPU, we only support sorted=False for now.
'''
__props__ = TopKOp.__props__
_f16_ok = True
def __init__(
self, axis=-1,
sorted=True,
idx_dtype='int64',
return_values=True,
return_indices=True
):
if sorted:
raise NotImplementedError(
"GpuTopK currently is not sure to give sorted output even if they look sorted..")
GpuKernelBase.__init__(self)
TopKOp.__init__(
self, axis=axis,
sorted=sorted,
idx_dtype=idx_dtype,
return_values=return_values,
return_indices=return_indices)
def perform(self, node, inputs, output_storage, params):
raise NotImplementedError()
def c_headers(self):
return ['gpuarray_api.h', 'gpuarray_helper.h', 'numpy_compat.h']
def c_header_dirs(self):
return [
os.path.dirname(__file__),
gpuarray_helper_inc_dir(),
pygpu.get_include()]
def c_code_cache_version(self):
return (4,)
def gpu_kernels(self, node, nodename):
# load kernel source
device_type = node.inputs[0].type.context.kind
kernel_ext = {b'cuda': '.cu', b'opencl': '.cl'}[device_type]
common_ext = {b'cuda': '.cuh', b'opencl': '.h'}[device_type]
# prepare "$" macros
if device_type == b'cuda':
ndim = node.inputs[0].ndim
dstv_strides_code = ''.join('ssize_t dstv_strides_%d, ' % i for i in range(ndim))
dsti_strides_code = ''.join('ssize_t dsti_strides_%d, ' % i for i in range(ndim))
src_strides_code = ''.join('ssize_t src_strides_%d, ' % i for i in range(ndim))
set_slice_code = '''
gidx = gid %% dims_%(i)d;
gid /= dims_%(i)d;
{dstv};
{dsti};
src = ptr_add(src, gidx*src_strides_%(i)d);\n'''.format(
dstv='dstv = ptr_add(dstv, gidx*dstv_strides_%(i)d)' if self.return_values else '',
dsti='dsti = ptr_add(dsti, gidx*dsti_strides_%(i)d)' if self.return_indices else '')
set_slice_code = ''.join(
set_slice_code % dict(i=j) for j in range(1, ndim))
if self.return_values:
set_slice_code += """
dstv = ptr_add(dstv, dstv_offset);
"""
if self.return_indices:
set_slice_code += """
dsti = ptr_add(dsti, dsti_offset);
"""
set_slice_code += """
src = ptr_add(src, src_offset);
"""
flags = Kernel.get_flags(node.inputs[0].dtype)
subs = dict(
inp_t=ga.dtype_to_ctype(node.inputs[0].dtype),
out_t=ga.dtype_to_ctype(self.idx_dtype),
dims=''.join('size_t dims_%d, ' % i for i in range(1, ndim)),
dstv='INPUT_TYPE *dstv,' if self.return_values else '',
dstv_offset='size_t dstv_offset,' if self.return_values else '',
dsti='INDEX_TYPE *dsti,' if self.return_indices else '',
dsti_offset='size_t dsti_offset,' if self.return_indices else '',
dstv_strides=dstv_strides_code if self.return_values else '',
dsti_strides=dsti_strides_code if self.return_indices else '',
src_strides=src_strides_code,
set_slice=set_slice_code,
write_value=int(self.return_values),
write_index=int(self.return_indices),
ndim=str(ndim)
)
elif device_type == b'opencl':
raise NotImplementedError()
# setup parameters
param_types = [ga.SIZE] * (ndim - 1) # dims
for _ in range(self.return_values + self.return_indices):
param_types.append(ga.GpuArray) # dst*
param_types.append(ga.SIZE) # offset
param_types.extend([ga.SSIZE] * ndim) # dst*_strides
param_types.append(ga.SIZE) # k
param_types.append(ga.GpuArray) # src
param_types.append(ga.SIZE) # offset
param_types.extend([ga.SSIZE] * ndim) # src_strides
param_types.append(ga.SIZE) # size
# load and compile kernels
with open(os.path.join(
os.path.dirname(__file__), 'c_code', 'topk_common' + common_ext
)) as f:
common_src = f.read()
kernels = []
def build_kernel(fname, kname, subs):
with open(os.path.join(
os.path.dirname(__file__), 'c_code', fname)
) as f:
kernel_src = f.read()
ker = Kernel(
code=("#include <cluda.h>\n" +
Template(common_src + kernel_src).substitute(**subs)),
name=kname,
params=param_types,
flags=flags,
objvar=kname + nodename)
return ker
subs['count_t'] = 'int'
kernels.append(
build_kernel('topk_dense' + kernel_ext, 'k_topk_dense', subs))
subs['kname'] = 'k_topk_dense_large'
kernels.append(
build_kernel('topk_dense_large' + kernel_ext, 'k_topk_dense_large', subs))
subs['count_t'] = 'long long'
subs['kname'] = 'k_topk_dense_xlarge'
kernels.append(
build_kernel('topk_dense_large' + kernel_ext, 'k_topk_dense_xlarge', subs))
return kernels
def c_code(self, node, nodename, inps, outs, sub):
context = node.inputs[0].type.context
if context.kind != b'cuda':
raise NotImplementedError(
'%s: We only have CUDA '
'implementation so far.' % self.__class__.__name__)
x, k = inps
inp_dtc = ga.dtype_to_typecode(node.inputs[0].dtype)
if not self.return_indices:
yv, = outs
elif self.return_values:
yv, yi = outs
else:
yi, = outs
out_dtype_s = self.idx_dtype
out_dtc = ga.dtype_to_typecode(out_dtype_s)
fail = sub['fail']
ctx = sub['params']
k_dtype = node.inputs[1].type.dtype_specs()[1]
# max threads per block
MAX_TPB = context.maxlsize0
# max blocks per grid
MAX_BPG = context.maxgsize0
WARP_SIZE = 32
ndim = node.inputs[0].ndim
reordered_axes = list(range(ndim))
axis = self.axis % ndim
del(reordered_axes[axis])
reordered_axes = [axis] + reordered_axes
dims = ''.join('dims[%d], ' % i for i in reordered_axes[1:])
prep_output = ''
if self.return_values:
def_dvstrides = 'const ssize_t *dvstrides = PyGpuArray_STRIDES(%s)' % yv
params_dv = '%s->ga.data, %s->ga.offset,\n' % (yv, yv)
params_dv += ''.join('dvstrides[%d], ' % i for i in reordered_axes)
prep_output += '''
if (0 != theano_prep_output(
&%(yv)s, %(ndim)d, odims,
%(inp_dtc)s, GA_C_ORDER, %(ctx)s)) {
%(fail)s;
}\n''' % locals()
else:
def_dvstrides = params_dv = ''
if self.return_indices:
def_distrides = 'const ssize_t *distrides = PyGpuArray_STRIDES(%s)' % yi
params_di = '%s->ga.data, %s->ga.offset,\n' % (yi, yi)
params_di += ''.join('distrides[%d], ' % i for i in reordered_axes)
prep_output += '''
if (0 != theano_prep_output(
&%(yi)s, %(ndim)d, odims,
%(out_dtc)s, GA_C_ORDER, %(ctx)s)) {
%(fail)s;
}\n''' % locals()
else:
def_distrides = params_di = ''
sstrides = ', '.join('sstrides[%d]' % i for i in reordered_axes)
code = '''
{
const ssize_t k_ = ((%(k_dtype)s*)(PyArray_DATA(%(k)s)))[0];
const size_t *dims = PyGpuArray_DIMS(%(x)s);
size_t odims[%(ndim)d];
for (int i=0; i<%(ndim)d; i++)
odims[i] = dims[i];
odims[%(axis)d] = k_>=0 ? k_ : -k_;
if (0 == odims[%(axis)d]) {
PyErr_SetString(
PyExc_ValueError,
"topk: kth must not be zero");
%(fail)s;
} else if (dims[%(axis)d] < odims[%(axis)d]) {
PyErr_SetString(
PyExc_ValueError,
"topk: kth cannot be larger than the size of specified axis %(axis)d");
%(fail)s;
}
%(prep_output)s
size_t grid_size=1, block_size=1;
for (int i=0; i<%(ndim)d; ++i) {
if (i!=%(axis)d)
grid_size *= dims[i];
else
block_size = dims[i];
}
// round up to multiples of warp size
block_size = ((block_size + %(WARP_SIZE)d - 1) / %(WARP_SIZE)d) * %(WARP_SIZE)d;
if (grid_size > %(MAX_BPG)d) {
PyErr_SetString(
PyExc_ValueError,
"topk: too many slices to work with, expected <= %(MAX_BPG)d");
%(fail)s;
}
%(def_dvstrides)s;
%(def_distrides)s;
const ssize_t *sstrides = PyGpuArray_STRIDES(%(x)s);
int err;
if (dims[%(axis)d] > (1u << 31)) {
block_size = %(MAX_TPB)d;
err = k_topk_dense_xlarge_call(
1, &grid_size, &block_size, 0,
%(dims)s
%(params_dv)s
%(params_di)s
k_,
%(x)s->ga.data,
%(x)s->ga.offset,
%(sstrides)s,
dims[%(axis)d]
);
} else if (block_size > %(MAX_TPB)d) {
block_size = %(MAX_TPB)d;
err = k_topk_dense_large_call(
1, &grid_size, &block_size, 0,
%(dims)s
%(params_dv)s
%(params_di)s
k_,
%(x)s->ga.data,
%(x)s->ga.offset,
%(sstrides)s,
dims[%(axis)d]
);
} else {
err = k_topk_dense_call(
1, &grid_size, &block_size, 0,
%(dims)s
%(params_dv)s
%(params_di)s
k_,
%(x)s->ga.data,
%(x)s->ga.offset,
%(sstrides)s,
dims[%(axis)d]
);
}
if (err != GA_NO_ERROR) {
PyErr_SetString(
PyExc_RuntimeError,
"topk: gpu kernel failed to execute");
%(fail)s;
}
}
'''
return code % locals()
def make_node(self, inp, kth):
ctx_name = infer_context_name(inp)
inp = as_gpuarray_variable(inp, ctx_name)
kth = as_tensor_variable(kth)
bcast = inp.type.broadcastable
outs = []
if self.return_values:
outs.append(inp.type())
if self.return_indices:
outs.append(GpuArrayType(
dtype=self.idx_dtype,
broadcastable=bcast,
context_name=ctx_name)())
return Apply(self, [inp, kth], outs)
def get_params(self, node):
return node.inputs[0].type.context
class ValuesEqApproxNoOrder():
"""
We ignore the order of elements on a given axis during the comparison.
"""
def __init__(self, axis):
self.axis = axis
def __call__(self, val1, val2):
v1 = np.sort(val1, axis=self.axis)
v2 = np.sort(val2, axis=self.axis)
ret = theano.tensor.type.values_eq_approx(v1, v2)
return ret
@register_opt('fast_compile')
@op_lifter([TopKOp], cuda_only=True)
@register_opt2([TopKOp], 'fast_compile')
def local_gpua_topkop(op, ctx_name, inputs, outputs):
axis = op.axis
rv = op.return_values
ri = op.return_indices
x, k = inputs
x = as_gpuarray_variable(x, ctx_name)
if op.sorted:
return
gpu_op = GpuTopKOp(
axis=axis,
sorted=op.sorted,
idx_dtype=op.idx_dtype,
return_values=rv,
return_indices=ri)
rets = gpu_op(x, k, return_list=True)
c = ValuesEqApproxNoOrder(axis)
for r in rets:
r.tag.values_eq_approx = c
return rets
| Theano/Theano | theano/gpuarray/sort.py | sort.py | py | 12,720 | python | en | code | 9,807 | github-code | 36 | [
{
"api_name": "basic_ops.GpuKernelBase",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "theano.tensor.sort.TopKOp",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "theano.tensor.sort.TopKOp.__props__",
"line_number": 33,
"usage_type": "attribute"
},... |
2364287539 | from PyQt5 import QtWidgets, uic,QtSql,QtCore
from rajon_1 import Ui_Dialog
import sqlite3 as sql
from vypocet import vypocty
from data import Databaze
from PyQt5.QtWidgets import QFileDialog
from cesta import path
import math
class Rajon(QtWidgets.QDialog,Ui_Dialog):
def __init__(self,cesta_projektu):
super().__init__()
self.setupUi(self)
self.cesta=cesta_projektu
self.vypocet.clicked.connect(self.vypocet_rajonu)
self.uloz_bod.clicked.connect(self.ulozeni_rajonu)
self.protokol.clicked.connect(self.protokol_rajon)
self.show()
self.exec()
def vypocet_rajonu(self):
# vypocet rajonu
print("*******************************************")
print("Vypocet rajonu")
# vezme hodnoty z textEdit
self.stan = self.stanovisko.toPlainText()
self.ori = self.orientace.toPlainText()
self.bod = self.merenybod.toPlainText()
#tahani dat z databaze
query_stanovisko = 'select * from gps_sour where CB is " {} "'.format(self.stan)
query_orientace = 'select * from gps_sour where CB is " {} "'.format(self.ori)
query_bod = 'select * from mereni where Orientace is " {} " and Stanovisko is " {} "'.format(self.bod,self.stan)
query_mereni_orientace = 'select * from mereni where Orientace is " {} " and Stanovisko is " {} "'.format(self.ori,self.stan)
self.stanovisko_data = Databaze.sql_query(self.cesta,query_stanovisko)
self.orientace_data = Databaze.sql_query(self.cesta,query_orientace)
self.bod_data = Databaze.sql_query(self.cesta,query_bod)
self.mereni_orientace = Databaze.sql_query(self.cesta,query_mereni_orientace)
try:
# priprava slovniku pro vypocet rajonu
b_ori={5001:{'x': self.orientace_data[0][3], 'y': self.orientace_data[0][2], 'smer': self.mereni_orientace[0][5]}}
b_sta={'X': self.stanovisko_data[0][3], 'Y': self.stanovisko_data[0][2]}
b_mer={5003:{'delka': self.bod_data[0][3]*math.sin(self.bod_data[0][4]*math.pi/200), 'smer': self.bod_data[0][5]}}
# vypocet rajonu
rajon=vypocty.rajon(b_ori,b_sta,b_mer)
# vypsani souradnic rajonu
self.souradniceX=vypocty.zaokrouhleni(rajon[5003]['x'],3)
self.souradniceY=vypocty.zaokrouhleni(rajon[5003]['y'],3)
self.X.setText(str(self.souradniceX))
self.Y.setText(str(self.souradniceY))
except IndexError:
print("Data nejsou soucasti projektu!!")
def ulozeni_rajonu(self):
# ulozi souradnice spocitaneho rajonu
try:
query='insert into gps_sour (CB, Y, X, kod) values (" {} ", {}, {}," {} ")'.format(self.bod, self.souradniceY, self.souradniceX, self.bod_data[0][6])
Databaze.pridani_bodu(self.cesta, query)
print("Bod ulozen!!")
except IndexError:
print("Bod neni spocitan!!")
def protokol_rajon(self):
# ulozi protokol o vypoctu
cesta=QFileDialog.getSaveFileUrl()
cesta=cesta[0].toString()
cesta=cesta[8:]
try:
# vypsani protokolu
protokol = open(cesta,'a')
protokol.write("************************* \n")
protokol.write("Vypocet rajonu \n")
protokol.write("Stanovisko: {} \n".format(self.stan))
protokol.write("Orientace: {} \n".format(self.ori))
protokol.write("Vysledny bod: \n")
protokol.write("CB: {} \n".format(self.bod))
protokol.write("Souradnice X: {} \n".format(str(self.souradniceX)))
protokol.write("Souradnice Y: {} \n".format(str(self.souradniceY)))
protokol.write("************************* \n")
protokol.close()
print("Protokol ulozen!!")
except AttributeError:
print("Uloha neni spocitana!!")
if __name__ == "__main__":
app=QtWidgets.QApplication([])
okno=Rajon()
okno.show()
app.exec()
| ctu-yobp/2020-a | app/rajon_2.py | rajon_2.py | py | 4,041 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "PyQt5.QtWidgets.QDialog",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "rajon_1.Ui_Dialog",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "data.D... |
33390403216 | from django.shortcuts import redirect, render
from animalprofile.models import Animal, Kind
from userprofile.models import UserAccount
from .forms import SearchAnimalsForm
def index(request):
if request.method == 'POST':
form = SearchAnimalsForm(request.POST)
if form.is_valid():
try:
params_search = dict()
req_post = request.POST.dict()
for field, value in form.cleaned_data.items():
if field == 'age': continue
if str(value) == 'не указано': continue
params_search[field] = req_post[field]
params_search['age__lte'] = int(req_post['age'])
params_search['isOwnerFound'] = False
animals = Animal.objects.filter(**params_search)
except:
form.add_error(None, 'Ошибка поиска')
else:
form = SearchAnimalsForm()
animals = Animal.objects.filter(isOwnerFound=False)
count_animals = len(animals)
user = request.user
if user.is_authenticated:
user_account = user.useraccount
animal_list = list()
for animal in animals:
animal_list.append({
'id': animal.id,
'name': animal.name,
'photo': animal.photo,
'isOwnerFound': animal.isOwnerFound,
'isFavorite': user_account in animal.inFavorites.all()
})
else:
user_account = None
animal_list = animals
return render(
request,
'animalprofiles/index.pug',
{
'animals': animal_list,
'user_account': user_account,
'count_animals': count_animals,
'form': form
}
)
| manulovich/zoo-friend | zoofriends/animalprofiles/views.py | views.py | py | 1,839 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "forms.SearchAnimalsForm",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "animalprofile.models.Animal.objects.filter",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "animalprofile.models.Animal.objects",
"line_number": 23,
"usage_type": ... |
17079399924 | from operator import itemgetter
from typing import Dict, Iterable, List, Optional, Tuple
import typer
from lib.legiscan import BillDescriptor, download_and_extract
from lib.util import load_json
def wrangle_metadata(metadata: Dict) -> Tuple[BillDescriptor, Optional[str]]:
"""Get the stuff from a metadata blob that we need to download a piece of legislation"""
descriptor = BillDescriptor(*itemgetter('state', 'bill_number', 'bill_id')(metadata))
doc_id = None if len(metadata['texts']) < 1 else (
sorted(
metadata['texts'],
key=lambda t: t['date'],
reverse=True
)[0]['doc_id']
)
return (descriptor, doc_id)
def retrieve_legislation(
metadata_files: Iterable[str],
output_path: str,
) -> List[str]:
"""Retrieve contents of legislation specified by each metadata file"""
download_args = (
(
*wrangle_metadata(load_json(metadata_filename)['bill']),
output_path
)
for metadata_filename
in metadata_files
)
return [
download_and_extract(*arg)
for arg
in download_args
if arg[1]
]
def main(
metadata_files: List[str],
output_path: str,
):
"""The CLI for this task"""
retrieve_legislation(metadata_files, output_path)
if __name__ == "__main__":
typer.run(main)
| amy-langley/tracking-trans-hate-bills | lib/tasks/legiscan/retrieve_legislation.py | retrieve_legislation.py | py | 1,373 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "typing.Dict",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "lib.legiscan.BillDescriptor",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "operator.itemgetter",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "typing.Tup... |
8567322626 | # coding: utf-8
#from PyInstaller.utils.hooks import copy_metadata, collect_data_files
#datas = copy_metadata('google-api-python-client')
#datas += collect_data_files('googleapiclient.discovery')
#datas += collect_data_files('PyInstaller.utils.hooks')
import os, sys, re
import pandas as PD
from datetime import datetime,timedelta
from configparser import ConfigParser
from MyPack import *
_R = None
_notify = True
_tk = ''
_id = ''
def isinduty (A):
p1 = '[0|1]?[0-9]/[0-3]?[0-9]集'
rp = re.compile (p1)
m = rp.search(A)
if m is not None:
lastday = m[0].split('集')[0]
if datetime.strptime (lastday,'%m/%d') >= datetime.strptime(datetime.now().strftime('%m/%d'),'%m/%d'):
return True
else:
return False
else:
return True
def fun_classify(qData):
global _R
#Others = qData.filter(items=['Seq','IdNo','OtherIdNo','Name','CellPhone','Addr','Remark','ErrorType'])
Others = qData.copy()
Others['classify'] = ''
try :
de2 = Others[(Others['TownCode'].str.contains('^[0-9]{4}$',na = False)
& Others['ErrorType'].str.contains('地址異常',na = False)
#& ~(Others['CellPhone']=='0900000000')
)]
Others = Others[~(Others['TownCode'].str.contains('^[0-9]{4}$',na = False)
& Others['ErrorType'].str.contains('地址異常',na = False)
#& ~(Others['CellPhone']=='0900000000')
)]
de2['classify'] = 'DC程式錯誤'
de3 = Others[(Others['CellPhone'].isnull() | Others['Addr'].isnull())
&~(Others['Name'].str.contains('集中檢疫',na = False)|Others['Nationality'].str.contains('PH',na = False))]
Others = Others[~((Others['CellPhone'].isnull() | Others['Addr'].isnull())
&~(Others['Name'].str.contains('集中檢疫',na = False)|Others['Nationality'].str.contains('PH',na = False)))]
de3['classify'] = '部分確認'
dataError = PD.concat([de2,de3])
Z = dataError.copy()
except :
dataError = []
#=================================================================
try:
de1 = Others[Others['Name'].str.contains('未入境',na = False)]
Others = Others[~(Others['Name'].str.contains('未入境',na = False))]
de1['classify'] = '未入境'
Z = PD.concat([Z,de1])
except:
pass
try :
be_punish = Others[Others['Name'].str.contains(_R['be_punish'],na = False)]
Others = Others[~Others['Name'].str.contains(_R['be_punish'],na = False)]
be_punish['classify'] = '服刑中'
Z = PD.concat([Z,be_punish])
except :
be_punish = []
try :
A2N = Others[Others['CellPhone'].isnull() |Others['Name'].str.contains('集中檢疫|特定.*檢疫',na = False)| Others['Remark'].str.contains('強制',na = False)]
Others = Others[~(Others['CellPhone'].isnull() |Others['Name'].str.contains('集中檢疫|特定.*檢疫',na = False)| Others['Remark'].str.contains('強制',na = False))]
for _A2 in A2N.iterrows():
try :
if isinduty(_A2[1]['Name']):
_A2[1]['classify'] = '集中檢疫'
A2N.update(_A2)
elif _A2[1]['Name'].find('安置') != -1:
_A2[1]['classify'] = '集中檢疫'
A2N.update(_A2)
elif _A2[1]['Name'].find('違規') != -1:
_A2[1]['classify'] = '集中檢疫'
A2N.update(_A2)
elif _A2[1]['Name'].find('移工') != -1:
_A2[1]['classify'] = '集中檢疫'
A2N.update(_A2)
elif _A2[1]['Name'].find('(集中檢疫)') != -1:
_A2[1]['classify'] = '集中檢疫'
A2N.update(_A2)
elif _A2[1]['Name'].find('(集中檢疫)') != -1:
_A2[1]['classify'] = '集中檢疫'
A2N.update(_A2)
elif _A2[1]['Nationality'] == 'PH':
_A2[1]['classify'] = '集中檢疫'
A2N.update(_A2)
elif _A2[1]['Remark'].find('強制') != -1:
_A2[1]['classify'] = '集中檢疫'
A2N.update(_A2)
except:
continue
A2 = A2N[A2N['classify'].str.contains('集中檢疫')]
Others = PD.concat([Others,A2N[~A2N['classify'].str.contains('集中檢疫',na = False)]])
Z = PD.concat([Z,A2])
except :
A2 = []
try :
A11 = Others[Others['Name'].str.contains(_R['to_ISO'],na = False)|Others['Remark'].str.contains(_R['to_ISO'],na = False)]
Others = Others[~(Others['Name'].str.contains(_R['to_ISO'],na = False)|Others['Remark'].str.contains(_R['to_ISO'],na = False))]
A11['classify'] = '轉隔離'
Z = PD.concat([Z,A11])
except :
A11 = []
try :
outgoing = Others[Others['Remark'].str.contains('奔喪|外出|探病|就診|就醫|採檢|篩檢',na = False) & ~Others['Remark'].str.contains('入住|住院|治療|違規',na = False) & ~Others['Name'].str.contains('住院',na = False)]
Others = Others[~(Others['Remark'].str.contains('奔喪|外出|探病|就診|就醫|採檢|篩檢',na = False) & ~Others['Remark'].str.contains('入住|住院|治療|違規',na = False)& ~Others['Name'].str.contains('住院',na = False))]
outgoing['classify'] = '暫時解列'
Z = PD.concat([Z,outgoing])
except :
outgoing = []
try :
in_hospital = Others[Others['Remark'].str.contains('醫院|病房|治療|入住|住院|開刀|手術',na = False) | Others['Name'].str.contains('住院|醫院',na = False)]
Others = Others[~(Others['Remark'].str.contains('醫院|病房|治療|入住|住院|開刀|手術',na = False) | Others['Name'].str.contains('住院|醫院',na = False))]
in_hospital['classify'] = '住院中'
Z = PD.concat([Z,in_hospital])
except :
outgoing = []
in_hospital = []
try:
foreign_Student = Others[(Others['TownCode'].str.contains('^[0-9]{4}$',na = False))]
Others = Others[~(Others['TownCode'].str.contains('^[0-9]{4}$',na = False))]
foreign_Student['classify'] = '外籍學生'
Z = PD.concat([Z,foreign_Student])
except:
foreign_Student = []
try :
D1 = Others[Others['Remark'].str.contains('船上聯絡人|籍、船員|漁工',na = False) | Others['Addr'].str.contains('原船檢疫|船號|船員',na = False)]
onboard_native = D1[D1['IdNo'].str.contains('[A-Z][1|2][0-9]{8}',na = False)]
onboard_aboard = D1[~D1['IdNo'].str.contains('[A-Z][1|2][0-9]{8}',na = False)]
Others = Others[~(Others['Remark'].str.contains('船上聯絡人|籍、船員|漁工',na = False) | (Others['Addr'].str.contains('原船檢疫|船號|船員',na = False)))]
onboard_native['classify'] = '本籍船員原船檢疫'
onboard_aboard['classify'] = '外籍船員原船檢疫'
#print (D2.filter(['Seq','IdNo','OtherIdNo','Remark']))
Z = PD.concat([Z,onboard_native])
Z = PD.concat([Z,onboard_aboard])
except:
onboard_native = []
onboard_aboard = []
try :
achild = Others[Others['Remark'].str.contains('幼童|幼兒|嬰兒|未成年|小朋友|小孩|共用|[0|1]?[0-9]歲',na = False) | Others['Name'].str.contains('嬰兒|未成年|小孩|[0|1]?[0-9]歲',na = False)]
Others = Others[~(Others['Remark'].str.contains('幼童|幼兒|嬰兒|未成年|小朋友|小孩|共用|[0|1]?[0-9]歲',na = False) | Others['Name'].str.contains('嬰兒|未成年|小孩|[0|1]?[0-9]歲',na = False))]
achild['classify'] = '兒童'
Z = PD.concat([Z,achild])
except:
achild = []
try :
no_mobile = Others[(Others['Remark'].str.contains('發罄|發謦|發磬|手機已罄',na = False))|(Others['Addr'].str.contains('發罄|發謦|發磬|手機已罄',na = False))|(Others['Name'].str.contains('發罄|發謦|發磬|手機已罄',na = False))]
Others = Others[~((Others['Remark'].str.contains('發罄|發謦|發磬|手機已罄',na = False))|(Others['Addr'].str.contains('發罄|發謦|發磬|手機已罄',na = False))|(Others['Name'].str.contains('發罄|發謦|發磬|手機已罄',na = False)))]
no_mobile['classify'] = '手機發罄'
Z = PD.concat([Z,no_mobile])
except:
no_mobile = []
try :
wait_mobile = Others[Others['Remark'].str.contains('待送|待發|發防疫手機|需要防疫手機|申請防疫手機',na = False)|(Others['Name'].str.contains('防疫手機',na = False))]
Others = Others[~(Others['Remark'].str.contains('待送|待發|發防疫手機|需要防疫手機|申請防疫手機',na = False)|(Others['Name'].str.contains('防疫手機',na = False)))]
wait_mobile['classify'] = '待發手機'
Z = PD.concat([Z,wait_mobile])
except:
wait_mobile = []
try :
no_signal = Others[Others['Name'].str.contains('訊號|收訊|手機異常',na = False)|Others['Remark'].str.contains('訊號|收訊|手機異常',na = False)]
Others= Others[~(Others['Name'].str.contains('訊號|收訊|手機異常',na = False)|Others['Remark'].str.contains('訊號|收訊|手機異常',na = False))]
no_signal['classify'] = '訊號不良'
Z = PD.concat([Z,no_signal])
except:
no_signal = []
try :
wait_employ = Others[Others['Remark'].str.contains('移工|雇主|僱主|仲介',na = False)|(Others['Name'].str.contains('公司',na = False))]
Others = Others[~(Others['Remark'].str.contains('移工|雇主|僱主|仲介',na = False)|(Others['Name'].str.contains('公司',na = False)))]
wait_employ['classify'] = '仲介雇主'
Z = PD.concat([Z,wait_employ])
except :
wait_employ = []
try :
Business = Others[(Others['VisitPurpose'] == '1')]
Others = Others[~(Others['VisitPurpose'] == '1')]
Business['classify'] = '商務人士'
Z = PD.concat([Z,Business])
except :
Business = []
try :
in_rest = Others[Others['Remark'].str.contains('防疫旅宿|防疫旅館|飯店',na = False)]
Others = Others[~Others['Remark'].str.contains('防疫旅宿|防疫旅館|飯店',na = False)]
in_rest['classify'] = '防疫旅館'
Z = PD.concat([Z,in_rest])
except :
in_rest = []
try :
wait_modify = Others
wait_modify['classify'] = '待修正'
Z = PD.concat([Z,wait_modify])
except :
wait_modify = []
return Z
def __main__():
global _R
global _tk
global _id
ConfigFile = sys.argv[1] if len(sys.argv) > 1 else (os.path.dirname(sys.argv[0])+'\\QUA_Classify.ini')
#print (ConfigFile)
myCrypt = MyCryp()
Config = ConfigParser(allow_no_value=True)
Config.optionxform = str
Config.read(ConfigFile,encoding='utf-8')
_R = Config['RE']
_tk = Config['Notify']['Telegram Token']
_id = Config['Notify']['Chat ID']
saveto = Config['PATH']['saveto']
quapath = Config['PATH']['quapath']
isopath = Config['PATH']['isopath']
if Config.getboolean('SFTP','encrypt'):
un = myCrypt.decryp(Config['SFTP']['username'])
pw = myCrypt.decryp(Config['SFTP']['password'])
ip = myCrypt.decryp(Config['SFTP']['ip'])
else:
un = Config['SFTP']['username']
pw = Config['SFTP']['password']
ip = Config['SFTP']['ip']
Config['SFTP']['username'] = myCrypt.encryp(un)
Config['SFTP']['password'] = myCrypt.encryp(pw)
Config['SFTP']['ip'] = myCrypt.encryp(ip)
Config['SFTP']['encrypt'] = 'True'
A = SFTP(un,pw,ip)
t11 = datetime.now().strftime('%Y/%m/%d')
if int(datetime.now().strftime('%H')) > 15 :
t = datetime.now().strftime('%Y%m%d15')
t1 = datetime.now().strftime(u'%m月%d日15時')
ta = datetime.now().strftime('%Y%m%d15')
#tta = datetime.now().strftime('%Y%m%d16')
yt = datetime.now().strftime('%Y%m%d03')
tmpmsg = '下午'
else:
t = datetime.now().strftime('%Y%m%d03')
t1 = datetime.now().strftime(u'%m月%d日03時')
ta = datetime.now().strftime('%Y%m%d03')
yt = datetime.strftime(datetime.now() - timedelta(1), '%Y%m%d15')
#yt = datetime.strftime(datetime.now() - timedelta(1), '%Y%m%d16')
tmpmsg = '今早'
p = _R['FileQUA']
p1 = _R['FileCountQUA'].format(ta,'{1,5}')
p2 = _R['FileCountISO'].format(ta,'{1,5}')
#====QUA================================
pQSearch = re.compile(p.format('QUARANTINE','ABNORMAL_',t,'{1,5}'))
pISearch = re.compile(p.format('ISOLATION','ABNORMAL',t,'{1,5}'))
p1QSearch = re.compile(p1)
p2ISearch = re.compile(p2)
pyQSearch = re.compile(p.format('QUARANTINE','ABNORMAL_',yt,'{1,5}'))
pyISearch = re.compile(p.format('ISOLATION','ABNORMAL',yt,'{1,5}'))
#print (pQSearch)
A.ChangeDir(isopath)
try :
with A.getFile (p2ISearch) as qFile2 :
if qFile2 is not None:
sData = PD.read_csv(qFile2,header = 0,dtype = str)
itotal,iefence,iabnormal = sData['Totalnum'][0],sData['efencenum'][0],sData['unoffernum'][0]
else:
itotal,iefence,iabnormal = '0','0','0'
except :
raise Exception("Cannot find ISO total count file.")
try:
with A.getFile (pyISearch) as iyFile :
yData = PD.read_csv(iyFile,header = 0,dtype = str)
ySeq = yData['Seq']
except :
raise Exception("Cannot find last ISO file.")
try:
with A.getFile( pISearch) as iFile:
iData = PD.read_csv(iFile,header = 0,dtype = str,encoding= 'utf-8')
NewI = iData.query("Seq not in @ySeq")
nNewI = str(len(NewI))
except:
raise Exception("Cannot find ISO file.")
A.ChangeDir(quapath)
try :
with A.getFile (p1QSearch) as qFile1 :
if qFile1 is not None:
sData = PD.read_csv(qFile1,header = 0,dtype = int)
total,efence,abnormal = sData['Totalnum'][0],sData['efencenum'][0],sData['unoffernum'][0]
else:
total,efence,abnormal = 0,0,0
except :
raise Exception("Cannot find QUA total count file.")
try:
with A.getFile (pyQSearch) as qyFile :
yData = PD.read_csv(qyFile,header = 0,dtype = str)
ySeq = yData['Seq']
except :
raise Exception("Cannot find last QUA file.")
try:
with A.getFile( pQSearch) as qFile:
qData = PD.read_csv(qFile,header = 0,dtype = str,encoding= 'utf-8')
NewQ = qData.query("Seq not in @ySeq")
nNewQ = str(len(NewQ))
except:
raise Exception("Cannot find QUA file.")
Z = fun_classify(qData)
dataError = PD.concat([Z.query("classify == 'DC程式錯誤'") , Z.query("classify == '部分確認'")])
Others = Z.query("(classify != '部分確認') and (classify != 'DC程式錯誤')")
len_dataError = len(dataError)
if len(dataError) != 0:
#重新計算相關數據
NewQ = Others.query("Seq not in @ySeq")
nNewQ = str(len(NewQ))
total = total - len_dataError
abnormal = abnormal - len_dataError
remoteFilename = 'QUA_{}.xlsx'.format(t)
localFilename = saveto+'\\{}'.format(remoteFilename)
Z.to_excel(localFilename,sheet_name = 'ALL')
A.ChangeDir(quapath+'/ABNORMAL_Classified')
A.putFile(localFilename,remoteFilename)
ZZ = Z.filter(items=['classify','Nationality','VisitPurpose','Name','Remark','Addr','ErrorType','TownCode','CellPhone','Seq','IdNo','OtherIdNo'])
ZZ.to_excel(saveto+'\\QUA_{}_Classify_Check.xlsx'.format(t),sheet_name = 'ALL')
dataError.to_excel(saveto+'\\QUA_{}_dataError_{}.xlsx'.format(t,len_dataError),sheet_name = 'ALL')
qgs = GSheet(Config['GSheet']['ID'],Config['GSheet']['auth_json'])
ranges = u'QUA_1!B1:B13'
ranges1 = u'QUA_1!B15:B22'
ranges2 = u'QUA_1!B25'
values = [[t1]
,[str(total)]
,[str(abnormal)]
,[str(efence)]
,[str(len(Z.query("classify == u'手機發罄'")))]
,[str(len(Z.query("classify == u'待發手機'")))]
,[str(len(Z.query("classify == u'仲介雇主'")))]
,[str(len(Z.query("classify == u'商務人士'")))]
,[str(len(Z.query("classify == u'兒童'")))]
,[str(len(Z.query("classify == u'外籍學生'")))]
,[str(len(Z.query("classify == u'防疫旅館'")))]
,[str(len(Z.query("classify == u'待修正'")))]
,[str(len(Z.query("classify == u'未入境'")))]
,]
values1 =[[str(len(Z.query("classify == u'訊號不良'")))]
,[str(len(Z.query("classify == u'外籍船員原船檢疫'")))]
,[str(len(Z.query("classify == u'本籍船員原船檢疫'")))]
,[str(len(Z.query("classify == u'服刑中'")))]
,[str(len(Z.query("classify == u'住院中'")))]
,[str(len(Z.query("classify == u'集中檢疫'")))]
,[str(len(Z.query("classify == u'轉隔離'")))]
,[str(len(Z.query("classify == u'暫時解列'")))]
,]
values2 = [[nNewQ,],]
dd = { 'value_input_option': 'USER_ENTERED',
'data':[{ 'range': ranges,
'majorDimension': "ROWS",
'values': values
},
{ 'range':ranges1,
'majorDimension': "ROWS",
'values': values1
},
{ 'range':ranges2,
'majorDimension': "ROWS",
'values': values2
},],
}
ranges3 = u'QUA!A1:D1'
ranges4 = u'ISO!A1:D1'
values3 =[[t11,str(total),str(efence),str(abnormal), ], ]
values4 =[[t11,str(itotal),str(iefence),str(iabnormal), ], ]
#print (dd)
#'''
qgs.batchupdate(dd)
tmpData = qgs.get('QUA_1!B1:B25','COLUMNS')
qgs.append('QUA_ROWS!A1:X',{'values':tmpData['values']})
if (tmpmsg == '今早'):
qgs.append(ranges3,{'values':values3})
qgs.append(ranges4,{'values':values4})
Config['Work']['last done'] = t1
Config.write(open (ConfigFile,'w+',encoding='utf-8'))
if _notify == True:
T = Telegram(_tk,_id)
msg = ''
msg += '{}應管{},送出{},未送出{} #居檢統計\n'.format (t1,str(total),str(efence),str(abnormal))
msg += '{}應管{},送出{},未送出{} #隔離統計\n'.format (t1,itotal,iefence,iabnormal)
msg += '{} 本次居檢新增: {}, 資料錯誤: {}(未入境: {}) #居檢分析'.format(t1,nNewQ,len_dataError,len(Z.query("classify == u'未入境'")))
T.notify(msg)
msg = '報告,{}未送出給圍籬計 {} 筆,資料分類統計如下:'.format(tmpmsg,str(abnormal))
T.notify(msg)
if __name__ == '__main__' :
try:
__main__()
except Exception as e:
T = Telegram(_tk,_id)
msg = '{}'.format(str(e))
T.notify(msg) | jameswhc/QUA | QUA_Classify.py | QUA_Classify.py | py | 19,830 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "re.compile",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "datetime.date... |
17778652132 | from PyQt4.QtGui import *
from PyQt4.QtCore import *
import matplotlib.pyplot as plt
import matplotlib.backends.backend_qt4agg
import ReportDBOps as db
class ReportLogs(QWidget):
def __init__(self):
super(ReportLogs, self).__init__()
# Main Layout
self.mainGridLayout = QGridLayout()
# Sub Layout
# Sub Layout Properties
# self.mainGridLayout.setAlignment(Qt.AlignTop)
# Widget
self.lblReport = QLabel('Report')
self.comboReport = QComboBox()
self.btnShowReport = QPushButton('Show')
self.btnRefreshReportList = QPushButton('Refresh')
# Widget Properties
# Listeners
self.btnRefreshReportList.clicked.connect(self.loadReportList)
self.btnShowReport.clicked.connect(self.loadReport)
# Add to Sub Layout
# Add to main layout
self.mainGridLayout.addWidget(self.lblReport, 0, 0)
self.mainGridLayout.addWidget(self.comboReport, 0, 1)
self.mainGridLayout.addWidget(self.btnShowReport, 0, 2)
self.mainGridLayout.addWidget(self.btnRefreshReportList, 0, 3)
# Set the main layout
self.setLayout(self.mainGridLayout)
self.loadReportList()
def loadReportList(self):
self.comboReport.clear()
self.comboReport.addItems(db.getUniqueReports())
def loadReport(self):
if self.comboReport.currentText() != '':
try:
self.mainGridLayout.itemAt(4).widget().deleteLater()
except:
print('L')
self.drawPlot()
counts = db.getReportCounts(str(self.comboReport.currentText())) # [genCount, suspCount, threatCount, attackCount]
axis = plt.subplot2grid((6, 6), (0, 0), rowspan=6, colspan=6)
axis.pie([counts[3], counts[2], counts[1], counts[0]], colors=['r', 'y', 'c', 'g'], startangle=90, shadow=False, explode=[0.2, 0, 0, 0])
axis.set_title('Regular Vs Suspicious Vs Insider-Threat Vs Insider-Attack')
axis.legend(['%s Insider Attack' % str(counts[3]), '%s Insider Threat' % str(counts[2]), '%s Suspicious' % str(counts[1]), '%s Regular' % str(counts[0])])
self.mainGridLayout.addWidget(self.canvas, 1, 0, 1, 4)
def drawPlot(self):
self.figure = plt.figure()
self.drawing = self.figure.add_subplot(121)
self.canvas = matplotlib.backends.backend_qt4agg.FigureCanvasQTAgg(self.figure)
| subhamoykarmakar224/WindowsThreatAttackAnalyzer | ReportLogs.py | ReportLogs.py | py | 2,477 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "ReportDBOps.getUniqueReports",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "ReportDBOps.getReportCounts",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplot2grid",
"line_number": 56,
"usage_type": "call"
},
... |
7114744218 | import numpy as np
import pandas as pd
from sklearn.preprocessing import Imputer
import keras
from keras.utils import np_utils
# Veri dosya üzerinden okunur.
data = pd.read_csv('spambase.data')
# PART 1 - DATAPREPROCESSING
# Okunan veri girdi ve çıktı olarak ayrıştırılır.
input_datas = np.array(data.iloc[:,:57])
output_datas = np.array(data.iloc[:,57])
# Verinin %80'i train, %20'si test verisi olacak şekilde ayrılır.
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(input_datas, output_datas, test_size = 0.2, random_state = 0)
# Z-Score normalizasyon işlemi yapılır.
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
# PART 2 - CREATE ANN (http://keras.io - Farklı parametreleri kullanmak isteyenler için)
# Yapay sinir ağını oluşturmak için sıralı bir model kullanacağım belirtilir.
# Sıralı Model: Input Layer - Hidden Layer - Output Layer gibi..
# Input Layer ve İlk Hidden Layer katmanı eklenir.
# Units: O katman için kullanılacak olan düğüm sayısı
# Activation: Değerlerin belirli bir aralığa getirilmesi için kullanılacak fonksiyon
# Kernel_Initiliazer: Başlangıçta ağırlıkların hangi metod ile belirleneceği
# Input_shape veya Input_dim: Input layerdaki girdi sayısı. Verinizde her bir satırdaki özellik sayısı
def build_model():
model = Sequential()
model.add(Dense(units = 128, activation = 'relu', kernel_initializer = 'glorot_uniform', input_dim = 57))
model.add(Dropout(rate = 0.1))
model.add(Dense(units = 128, activation = 'relu', kernel_initializer = 'glorot_uniform'))
model.add(Dropout(rate = 0.1))
model.add(Dense(units = 1, activation = 'sigmoid', kernel_initializer = 'glorot_uniform'))
model.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
return model
# Cross Validation İsteğe Bağlı
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import cross_val_score
cv_classifier = KerasClassifier(build_fn=build_model,batch_size = 25,nb_epoch = 1000)
accuracies = cross_val_score(estimator = cv_classifier, X = X_train, y = y_train, cv = 10)
accuracySum = 0
for accuracy in accuracies:
accuracySum += accuracy
print(accuracySum / accuracies.size)
# -- Cross Val. Son -- #
# Oluşturulan model eğitilir. Zorunlu
classifier = build_model()
history_callback = classifier.fit(X_train, y_train, epochs = 100)
acc_history = history_callback.history["acc"]
# Her bir adımdaki "accuracy" değerlerini grafiğe dökmek için (Opsiyonel - Sonuca bir etkisi yok!)
acc_history.sort()
counter = 0
file = open("acc_history_128.csv","a")
result = "Iteration,Fitness\n"
for val in acc_history:
result = result + str(counter) + "," + str(val) + "\n"
counter = counter + 1
file.write(result)
file.close()
#### Görselleştirme Son ###########
# Opsiyonel. Matrix üzerinden doğru ve yanlış tahmin edilen değerler gösterilir.
y_pred = classifier.predict(X_test)
y_pred = y_pred > 0.5
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test,y_pred)
# Train edildikten sonra test datasının doğruluk ve kayıp değerleri hesaplanır.
loss, accuracy = classifier.evaluate(X_test, y_test) | zekikus/Yapay-Sinir-Agi-Uygulamalari | SpamBase_ANN/SpamBase_ANN.py | SpamBase_ANN.py | py | 3,448 | python | tr | code | 8 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_... |
18647131917 | from django.http import HttpResponse
from django.template import RequestContext
from taikoexplorer_db.models import Video, Composer, Song, Group, SongStyle, ComposerSong, VideoSong, VideoGroup
from django.core import serializers
from django.forms.models import model_to_dict
from django.db.models import Count
import youtube, json, sys
#serves the /add-video-data async requests
def editVideoData(request):
print("edit video data")
if request.method == 'POST':
vid = request.POST.get("vid")
vtitle = request.POST.get("vtitle", None)
vdesc = request.POST.get("vdesc", None)
dthumb = request.POST.get("dthumb", None)
mthumb = request.POST.get("mthumb", None)
cid = request.POST.get("cid", None)
ctitle = request.POST.get("ctitle", None)
groupName = request.POST.get("group_name", None)
songData = request.POST.get("song_title", None)
composerName = request.POST.get("composer_name", None)
songStyle = request.POST.get("song_style", None)
forceCreateSong = json.loads(
request.POST.get("force_create_song", False)
)
video = None
# add new video is it's not already in the db
video, created = Video.objects.get_or_create(
vid=vid,
title=vtitle,
description=vdesc,
channelTitle=ctitle,
channelID=cid,
default_thumb_url=dthumb,
medium_thumb_url=mthumb)
if created:
print("new video")
video.save()
if groupName is None and songData is None and composerName is None:
raise Exception("No data was provided")
# add group
if groupName is not None:
groupName = json.loads(groupName)
groupArr = []
for g in groupName :
group = None
if type(g['id']) is not int:
print("new group")
group = Group(name=g['id'])
group.save()
else:
group = Group.objects.get(pk=g['id'])
videoGroup, vg_created = VideoGroup.objects.get_or_create(
video=video,
group=group
)
if vg_created:
print("new video group association")
videoGroup.save()
groupArr.append(model_to_dict(group))
return HttpResponse(
json.dumps(groupArr), content_type='application/json')
# add song and composer
# must have a song title and a song style
if songData is not None and songStyle is not None:
songData = json.loads(songData)
composerName = json.loads(composerName)
songStyle = json.loads(songStyle)
song = None
if type(songData['id']) is not int or forceCreateSong is True:
# force create is disgusting and expensive as fuck
# since we query for all the songs first, we should probably store that
# info somewhere and check it again before assuming the user is a jerk
if forceCreateSong is True:
print("force create")
composerIDs = []
for c in composerName:
composerIDs.append(c['id'])
styles = SongStyle.objects.filter(name__in=songStyle)
composers = Composer.objects.filter(id__in=composerIDs)
# Good lord this query looks like shit...necessary though
# http://stackoverflow.com/questions/8618068/django-filter-queryset-in-for-every-item-in-list
# If the user forces a create, we need to check if the song they
# eventually enter is actually a real song. Only triggered if
# the user is actually kind of a jackass.
# still stupid that we have to do this...it'll work for now
songs = Song.objects.filter(
title=songData['text']
).filter(
styles__in=styles
).annotate(
num_styles=Count('styles')
).filter(
num_styles=len(styles)
).filter(
composers__in=composers
).annotate(
num_composers=Count('composers')
).filter(
num_composers=len(composers)
)
# there should only be one. this assumes that there would never be
# more than two songs titled the same thing with the same styles
# and the same composers
# if there's not...well we're fucked :P
if len(songs) > 0:
song = list(songs[:1])[0]
else:
song = Song(title=songData['text'])
print("new song")
song.save()
else:
song = Song(title=songData['id'])
print("new song")
song.save()
else:
song = Song.objects.get(pk=songData['id'])
songDict = model_to_dict(song)
# adding styles
for idx, ss in enumerate(songStyle):
style = SongStyle.objects.get(name=ss)
song.styles.add(style)
if idx == 0:
songDict["styles"] = []
songDict["styles"].append({
"fields": model_to_dict(style)
})
# adding the composers
for idx, c in enumerate(composerName) :
composer = None
if type(c['id']) is not int:
print("new composer")
composer = Composer(full_name=c['id'])
composer.save()
else:
composer = Composer.objects.get(pk=c['id'])
cs, cs_created = ComposerSong.objects.get_or_create(
composer=composer,
song=song
)
if cs_created:
print("new composer song association")
cs.save()
if idx == 0:
songDict["composers"] = []
songDict["composers"].append({
"fields": model_to_dict(composer)
})
videoSong, vs_created = VideoSong.objects.get_or_create(
video=video,
song=song
)
if vs_created:
print("new video song association")
videoSong.save()
sys.stdout.flush()
return HttpResponse(
json.dumps(songDict), content_type='application/json')
# not a post
return HttpResponse(json.dumps("failure"), content_type='application/json')
#serves the /delete-video-data async request
def deleteVideoData(request):
if request.method == 'POST':
vid = request.POST.get("vid")
eid = request.POST.get("eid")
type = request.POST.get("type")
if type == "song":
song = Song.objects.get(pk=eid)
video = Video.objects.get(vid=vid)
VideoSong.objects.get(video=video, song=song).delete()
elif type == "group":
group = Group.objects.get(pk=eid)
Video.objects.get(vid=vid).groups.remove(group)
return HttpResponse(json.dumps("success"), content_type='application/json')
# not a post
return HttpResponse(json.dumps("failure"), content_type='application/json')
# takes a video db object and formats it to match the yt search result json
def formattedVideoData(video):
return {
"id": {
"videoId": video.vid
},
"snippet": {
"title": video.title,
"description": video.description,
"channelId": video.channelID,
"channelTitle": video.channelTitle,
"thumbnails": {
"default": {
"url": video.default_thumb_url
},
"medium": {
"url": video.medium_thumb_url
}
}
}
}
def dbSearchResults(query):
songs = Video.objects.filter(
songs__title__icontains=query
)
groups = Video.objects.filter(
groups__name__icontains=query
)
composers = Composer.objects.filter(
full_name__icontains=query
).prefetch_related('songs__videos')
songStyles = SongStyle.objects.filter(
name__icontains=query
).prefetch_related('songs__videos')
composerVideos = []
for c in composers:
composerVideos.extend(c.videos)
songStyleVideos = []
for ss in songStyles:
songStyleVideos.extend(ss.videos)
# this ensures that we don't get duplicate videos
videos = list(
set(songs) | set(groups) | set(composerVideos) | set(songStyleVideos)
)
formattedVideos = []
for video in videos :
formattedVideos.append(formattedVideoData(video))
return [{"items" : formattedVideos}, videos]
def youtubeSearchResults(getrequest, results = 10):
if results <= 0:
return None
options = dict({
"q" : getrequest.get("query", None),
"maxResults" : getrequest.get("maxResults", results),
"pageToken" : getrequest.get("pageToken", "")
})
searchResults = youtube.youtube_search(options)
videos = Video.objects.filter(
vid__in=searchResults.get("vids", None)
).prefetch_related(
'songs__composers', 'groups'
)
results = [searchResults, videos]
return results
# iterate through tags and create a map with the vid as the key
def rekeyAndFormatVideoData(videos):
dataDict = {}
for video in videos :
songArr = json.loads(
serializers.serialize(
"json",
video.songs.all()
)
)
for idx, song in enumerate(songArr):
song["fields"]["composers"] = json.loads(
serializers.serialize(
"json",
video.songs.all()[idx].composers.all()
)
)
song["fields"]["styles"] = json.loads(
serializers.serialize(
"json",
video.songs.all()[idx].styles.all()
)
)
dataDict[video.vid] = {
"videoData" : model_to_dict(video),
"groups" : json.loads(
serializers.serialize("json", video.groups.all())
),
"songs" : songArr,
}
return dataDict
#serves the /confirm-video-data async request
def confirmVideoData(request):
if request.method == 'POST':
entityID = request.POST.get("entityid")
video = Video.objects.get(pk=entityID)
video.is_confirmed = True;
video.save()
return HttpResponse(json.dumps("success"), content_type='application/json')
return HttpResponse(json.dumps("failure"), content_type='application/json')
# serves the /yts api
def youtubeSearch(request):
query = request.GET.get("q", None)
query_type = request.GET.get("type", None)
vid = request.GET.get("vid", None)
type_dict = {
'composer': Composer,
'group': Group,
'song': Song
}
data = []
returnData = []
if query is not None and query_type is not None:
model = type_dict[query_type]
if query_type == 'composer':
try:
d = model.objects.all().filter(full_name__icontains=query)
data = d
except Composer.DoesNotExist:
data = []
elif query_type == 'group':
try:
d = model.objects.all().filter(name__icontains=query)
data = d
except Group.DoesNotExist:
data = []
elif query_type == 'song':
try:
d = model.objects.all().filter(title__icontains=query)
data = d
except Song.DoesNotExist:
data = []
if data:
# format the data
for entry in data:
entity = entry
entry = model_to_dict(entry)
if query_type == 'composer':
entry["text"] = entry["full_name"]
elif query_type == 'group':
entry["text"] = entry["name"]
elif query_type == 'song':
entry["text"] = entry["title"]
entry["videos"] = json.loads(
serializers.serialize(
"json",
Video.objects.filter(
songs__id=entry["id"]
).all()
)
)
entry["already_tagged"] = False
if vid is not None:
# whether or not the song is already tagged in the specified video
for video in entry["videos"]:
if video["fields"]["vid"] == vid:
entry["already_tagged"] = True
composerDict = json.loads(
serializers.serialize(
"json",
entity.composers.all()
)
)
composerInfo = []
for composer in composerDict:
composerInfo.append({
"text": composer["fields"]["full_name"],
"id":composer["pk"]
})
entry["composers"] = composerInfo
styleDict = json.loads(
serializers.serialize(
"json",
entity.styles.all()
)
)
styleNames = []
# We just need the name to set this
for style in styleDict:
styleNames.append(style["fields"]["name"])
entry["styles"] = styleNames
returnData.append(entry)
return HttpResponse(
json.dumps(returnData), content_type='application/json')
| mitchfuku/taikoexplorer | taikoexplorer/data.py | data.py | py | 12,403 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "json.loads",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "taikoexplorer_db.models.Video.objects.get_or_create",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "taikoexplorer_db.models.Video.objects",
"line_number": 31,
"usage_type": "... |
9195540013 | import argparse
import pickle
import lmdb
import torch
from tqdm import tqdm
from torch.utils.data import DataLoader
from vq_text_gan.datasets import BPEDataset
from vq_text_gan.utils import get_default_device
def extract_codes(args):
device = get_default_device(args.device)
print('Loading model')
model = torch.load(args.ckpt).to(device)
print('Loading data')
dataset = BPEDataset(args.dataset)
save_path = args.save_path
if not save_path:
save_path = 'codes'
map_size = 100 * 1024 * 1024 * 1024
env = lmdb.open(save_path, map_size=map_size)
batches = DataLoader(dataset, batch_size=args.batch_size)
with torch.no_grad():
model.eval()
with env.begin(write=True) as txn:
index = 0
for batch in tqdm(batches):
batch = batch.to(device)
codes, _, _ = model.encode(batch)
sample_wise_codes = list(zip(*[code.cpu().numpy() for code in codes]))
for sample_codes in sample_wise_codes:
txn.put(str(index).encode('utf-8'), pickle.dumps(sample_codes))
index += 1
txn.put('length'.encode('utf-8'), str(index).encode('utf-8'))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--ckpt', type=str, required=True)
parser.add_argument('--dataset', type=str, required=True)
parser.add_argument('--save-path', type=str)
parser.add_argument('--batch-size', type=int, default=512)
parser.add_argument('--device', type=str)
args = parser.parse_args()
extract_codes(args)
| kklemon/text-gan-experiments | legacy/vq_text_gan/extract_latents.py | extract_latents.py | py | 1,643 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "vq_text_gan.utils.get_default_device",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "torch.load",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "vq_text_gan.datasets.BPEDataset",
"line_number": 20,
"usage_type": "call"
},
{
"a... |
8435375603 | from aiogram import types, Dispatcher
from config import bot, dp, ADMINS
import random
async def game(message: types.Message):
if message.text.startswith('game') and message.from_user.id in ADMINS:
list_emoji = ['⚽', '🏀', '🎰', '🎳', '🎯', '🎲']
emoji_random = random.choice(list_emoji)
await bot.send_dice(message.from_user.id, emoji=emoji_random)
else:
await message.answer("Это функция для тебя не работает")
def register_admin_handlers(dp: Dispatcher):
dp.register_message_handler(game) | kasi170703/bots | handlers/admin.py | admin.py | py | 577 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "aiogram.types.Message",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "aiogram.types",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "config.ADMINS",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "random.choice",
... |
24398919509 | import requests
from bs4 import BeautifulSoup
import config
## setup
url = 'https://eu4.paradoxwikis.com/Achievements'
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
table = soup.find('table')
def scrape():
table_dict = {}
headers = config.headers
url = 'https://eu4.paradoxwikis.com/Achievements'
page = requests.get(url,headers=headers)
soup = BeautifulSoup(page.content, 'html.parser')
table = soup.find('table')
for i,val in enumerate(table.find_all('tr')):
## first table row is the headings of table
if i == 0:
continue
## Traversing html tags for data and adding to dictionary
table_dict[i]={}
table_dict[i]['dlc'] = []
td = val.find_all('td')
div_a = td[0].find_all('div')
li = td[4].find_all('li')
div_b = td[4].find_all('a')
for e in td[4].find_all('a'):
y = e.attrs
if len(y) == 0:
table_dict[i]['dlc'] = 'None'
elif len(y)== 10 or len(y) == 9:
pass
else:
table_dict[i]['dlc'].append(y['title'])
table_dict[i]['achievement'] = div_a[2].text.rstrip()
table_dict[i]['description'] = div_a[3].text.rstrip()
table_dict[i]['starting'] = td[1].text.rstrip()
table_dict[i]['requirements'] = td[2].text.rstrip()
table_dict[i]['version'] = td[5].text.rstrip()
table_dict[i]['difficulty'] = td[6].text.rstrip()
return table_dict
| CarsenKennedy/EU4-flask-api | webscraper.py | webscraper.py | py | 1,608 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "config.headers",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "requests.get",
"... |
72488882665 | from rest_framework import viewsets, views
from ..models import *
from .serializers import *
from rest_framework.permissions import *
from rest_framework.response import Response
from rest_framework.exceptions import NotFound, ValidationError
from rest_framework import mixins
from .custom_mixins import CreateListModelMixin
from .custom_functions import *
'''
Models - Serializer - ViewSet
Material - MaterialSerializer - MaterialView
Doctor - DoctorSerializer - DoctorView
Clinic - ClinicSerializer - ClinicView
Operation - OperationSerializer - OperationView
Work - WorkSerializer - WorkView
OperationsInWork - OperationsInWorkSerializer
Order - OrderSerializer - OrderView
File - FileSerializer - FileView
Technician - TechnicianSerializer - TechnicianView
WorkInOrders - WorkInOrdersSerializer - WorkInOrdersView
OperationInOrders - OperationsInOrdersSerializer - OperationsInOrdersView
WorksPriceList -
OperationPriceList -
MaterialsOnStock - MaterialsOnStockSerializer - MaterialsOnStockView
MaterialUsedOnOperation - MaterialUsedOnOperationSerializer - MaterialUsedOnOperationView
'''
class DoctorView(viewsets.ModelViewSet):
# permission_classes = [IsAuthenticated]
serializer_class = DoctorSerializer
def get_queryset(self):
"""
doctors only for authenticated owner
"""
return Doctor.objects.filter(user=self.request.user)
def perform_create(self, serializer):
serializer.save(user=self.request.user)
class ClinicView(viewsets.ModelViewSet):
permission_classes = [IsAuthenticated]
serializer_class = ClinicSerializer
def get_queryset(self):
return Clinic.objects.filter(user=self.request.user)
def perform_create(self, serializer):
serializer.save(user=self.request.user)
class MaterialView(viewsets.ModelViewSet):
permission_classes = [IsAuthenticated]
serializer_class = MaterialSerializer
def get_queryset(self):
return Material.objects.filter(user=self.request.user)
def perform_create(self, serializer):
material = serializer.save(user=self.request.user)
update_stock(material, 0, self.request.user)
class MaterialOnStockView(viewsets.ModelViewSet):
permission_classes = [IsAuthenticated]
serializer_class = MaterialOnStockSerializer
def get_queryset(self):
return MaterialOnStock.objects.filter(user=self.request.user)
def perform_create(self, serializer):
serializer.save(user=self.request.user)
class MaterialAddingView(viewsets.ModelViewSet):
permission_classes = [IsAuthenticated]
serializer_class = MaterialAddingSerializer
def get_queryset(self):
return MaterialAdding.objects.filter(user=self.request.user)
def perform_create(self, serializer):
serializer.save(user=self.request.user)
data = serializer.validated_data
update_stock(material=data.get('material'), amount=data.get('amount'),
user=self.request.user)
def perform_update(self, serializer):
serializer.save()
data = serializer.validated_data
update_stock(material=data.get('material'), amount=data.get('amount'),
user=self.request.user)
def perform_destroy(self, instance):
update_stock(instance.material, instance.amount, self.request.user, action='delete')
instance.delete()
class WorkView(viewsets.ModelViewSet):
permission_classes = [IsAuthenticated]
serializer_class = WorkSerializer
def get_queryset(self):
return Work.objects.filter(user=self.request.user)
def perform_create(self, serializer):
serializer.save(user=self.request.user)
class OperationView(viewsets.ModelViewSet):
permission_classes = [IsAuthenticated]
serializer_class = OperationSerializer
def get_queryset(self):
return Operation.objects.filter(user=self.request.user)
def perform_create(self, serializer):
serializer.save(user=self.request.user)
class TechnicianView(viewsets.ModelViewSet):
permission_classes = [IsAuthenticated]
serializer_class = TechnicianSerializer
def get_queryset(self):
return Technician.objects.filter(user=self.request.user)
def perform_create(self, serializer):
serializer.save(user=self.request.user)
class FileView(viewsets.ModelViewSet):
permission_classes = [IsAuthenticated]
serializer_class = FileSerializer
def get_queryset(self):
return File.objects.filter(user=self.request.user)
def perform_create(self, serializer):
serializer.save(user=self.request.user)
class MaterialUsedOnOperationView(CreateListModelMixin, viewsets.ModelViewSet):
permission_classes = [IsAuthenticated]
serializer_class = MaterialUsedOnOperationSerializer
def perform_create(self, serializer):
serializer.save(user=self.request.user)
def get_queryset(self):
return MaterialUsedOnOperation.objects.filter(user=self.request.user)
class OrderView(viewsets.ModelViewSet):
permission_classes = [IsAuthenticated]
serializer_class = OrderSerializer
def get_queryset(self):
return Order.objects.filter(user=self.request.user)
def perform_create(self, serializer):
serializer.save(user=self.request.user, balance=0)
# todo if is_closed is changed from 1 to 0
# def perform_update(self, serializer):
# if serializer.is_valid(raise_exception=True):
# prev_object = serializer
# prev_customer = prev_object.clinic if prev_object.clinic else serializer.prev_object.doctor
# prev_customer_type = 0 if prev_object.clinic else 1
# serializer.save()
# order = serializer.save()
# data = serializer.validated_data
# if data.get('clinic') or data.get('doctor'):
# if not prev_customer:
# if data.get('clinic'):
# customer = data.get('clinic')
# else:
# customer = data.get('doctor')
# customer.debt += order.total_price
# else:
# prev_customer.debt -= order.total_price
# if prev_customer_type:
# if data.get('clinic'):
# customer = data.get('clinic')
# else:
# customer = data.get('doctor')
# customer.debt += order.total_price
# return
# else:
# if data.get('doctor'):
# return
# else:
# customer = data.get('clinic')
# customer.debt += order.total_price
def perform_destroy(self, instance):
# customer = instance.clinic if instance.clinic else instance.doctor
# customer.debt -= instance.total_price
for i in instance.paymentfororder_set.all():
PaymentForOrderView().perform_destroy(instance=i)
instance.delete()
class WorkInOrdersView(CreateListModelMixin, viewsets.ModelViewSet):
permission_classes = [IsAuthenticated]
serializer_class = WorkInOrderSerializer
def get_queryset(self):
return WorkInOrders.objects.filter(user=self.request.user)
def perform_create(self, serializer):
data = serializer.validated_data
if isinstance(data, list):
for i in range(len(data)):
data[i]['user'] = self.request.user
serializer.save()
for i in range(len(data)):
update_sum_of_order(data[i].get('order'))
else:
data['user'] = self.request.user
serializer.save()
update_sum_of_order(data.get('order'))
def perform_update(self, serializer):
order_replaced = self.get_object().order
serializer.save()
data = serializer.validated_data
order = data.get('order') if data.get('order') else order_replaced
update_sum_of_order(order)
if order_replaced != order:
update_sum_of_order(order_replaced)
def perform_destroy(self, instance):
print(instance)
update_sum_of_order(instance.order)
instance.delete()
class OperationsInOrdersView(CreateListModelMixin, viewsets.ModelViewSet):
permission_classes = [IsAuthenticated]
serializer_class = OperationInOrderSerializer
def get_queryset(self):
return OperationsInOrders.objects.filter(user=self.request.user)
def perform_create(self, serializer):
serializer.save(user=self.request.user)
class WorksPriceListView(viewsets.ModelViewSet):
permission_classes = [IsAuthenticated]
serializer_class = WorksPriceListSerializer
def get_queryset(self):
return WorksPriceList.objects.filter(user=self.request.user)
def perform_create(self, serializer):
serializer.save(user=self.request.user)
class WorkPriceView(CreateListModelMixin, viewsets.ModelViewSet):
permission_classes = [IsAuthenticated]
serializer_class = WorkPriceSerializer
def get_queryset(self):
return WorkPrice.objects.filter(user=self.request.user)
def perform_create(self, serializer):
serializer.save(user=self.request.user)
class OperationsPriceListView(viewsets.ModelViewSet):
permission_classes = [IsAuthenticated]
serializer_class = OperationsPriceListSerializer
def get_queryset(self):
return OperationsPriceList.objects.filter(user=self.request.user)
def perform_create(self, serializer):
serializer.save(user=self.request.user)
class OperationPriceView(CreateListModelMixin, viewsets.ModelViewSet):
permission_classes = [IsAuthenticated]
serializer_class = OperationPriceSerializer
def get_queryset(self):
return OperationPrice.objects.filter(user=self.request.user)
def perform_create(self, serializer):
serializer.save(user=self.request.user)
class PaymentView(CreateListModelMixin,
mixins.CreateModelMixin,
mixins.RetrieveModelMixin,
mixins.ListModelMixin,
mixins.DestroyModelMixin,
viewsets.GenericViewSet):
permission_classes = [IsAuthenticated]
serializer_class = PaymentSerializer
# TODO Add Update Method when validation of doctor || clinic will be added
def get_queryset(self):
return Payment.objects.filter(user=self.request.user)
def perform_create(self, serializer):
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
serializer.save(user=self.request.user, balance=data.get('amount'))
if data.get('clinic') and data.get('doctor'):
raise ValidationError('Both clinic and doctor cannot be provided')
elif data.get('clinic'):
customer = data.get('clinic')
elif data.get('doctor'):
customer = data.get('doctor')
else:
raise ValidationError('Neither clinic nor doctor were provided')
update_paid(customer, data.get('amount'))
def perform_destroy(self, instance):
for i in instance.paymentfororder_set.all():
PaymentForOrderView().perform_destroy(instance=i)
instance.delete()
class PaymentForOrderView(CreateListModelMixin,
mixins.CreateModelMixin,
mixins.RetrieveModelMixin,
mixins.DestroyModelMixin,
mixins.ListModelMixin,
viewsets.GenericViewSet):
permission_classes = [IsAuthenticated]
serializer_class = PaymentForOrderSerializer
def get_queryset(self):
return PaymentForOrder.objects.filter(user=self.request.user)
def perform_create(self, serializer):
data = serializer.validated_data
if isinstance(data, list):
for i in range(len(data)):
update_balance_order_payment(data[i].get('order_instance'), data[i].get('payment_instance'),
data[i].get('amount'), 1)
data[i]['user'] = self.request.user
else:
update_balance_order_payment(data.get('order_instance'), data.get('payment_instance'),
data.get('amount'), 1)
data['user'] = self.request.user
serializer.save()
def perform_destroy(self, instance):
instance.delete()
update_balance_order_payment(instance.order_instance, instance.payment_instance, instance.amount, -1)
class WorksReportView(views.APIView):
"""
params:
doctor - pk, get orders of this doctor (optional)
clinic - pk, get orders of this clinic (optional)
start - date %d.%m.%Y, get orders from this date (optional)
end - date %d.%m.%Y, get order before this date (optional)
only_not_paid - int, if 1 - shows only not paid works, if 0 - show all works (optional)
"""
permission_classes = [IsAuthenticated]
def get(self, request):
user = request.user
report = []
query_params = request.query_params
clinic_filter, doctor_filter = query_params.get('clinic'), query_params.get('doctor')
only_not_paid_filter = query_params.get('only_not_paid')
# Проверяем на целое число
if clinic_filter:
clinic_filter = check_is_int(clinic_filter, 'Clinic')
clinic = Clinic.objects.filter(user=user).filter(id=clinic_filter).first()
if not clinic:
raise NotFound(detail='No such clinic')
if doctor_filter:
doctor_filter = check_is_int(doctor_filter, 'Doctor')
doctor = Doctor.objects.filter(user=user).filter(id=doctor_filter).first()
if not doctor:
raise NotFound(detail='No such doctor')
start_date_filter, end_date_filter = [convert_to_datetime(query_params.get('start')),
convert_to_datetime(query_params.get('end'))]
if only_not_paid_filter:
only_not_paid_filter = check_is_int(only_not_paid_filter, 'Only Not Paid')
# Если не заданы клиника и доктор
if not clinic_filter and not doctor_filter:
clinics_query, doctors_query = Clinic.objects.filter(user=user), Doctor.objects.filter(user=user)
orders_query = filter_by_date(Order.objects.filter(user=user), start_date_filter, end_date_filter)
for clinic in clinics_query:
orders = orders_query.filter(clinic=clinic).order_by('date')
report.append({'clinic': clinic.clinic_name, 'total': 0, 'patients': []})
form_report_of_patients(report[-1], orders, only_not_paid_filter)
for doctor in doctors_query:
orders = orders_query.filter(doctor=doctor).filter(clinic=None).order_by('date')
report.append({'doctor': doctor.doctor_name, 'total': 0, 'patients': []})
form_report_of_patients(report[-1], orders, only_not_paid_filter)
# Если задана и клиника, и доктор
elif doctor_filter and clinic_filter:
orders_query = filter_by_date(
Order.objects.filter(user=user).filter(clinic=clinic).filter(doctor=doctor).order_by('date'),
start_date_filter, end_date_filter)
report.append({'clinic': clinic.clinic_name, 'doctor': doctor.doctor_name, 'total': 0, 'patients': []})
form_report_of_patients(report[-1], orders_query, only_not_paid_filter)
# Если задан только доктор
elif doctor_filter:
orders_query = filter_by_date(
Order.objects.filter(user=user, doctor=doctor).order_by('date'),
start_date_filter, end_date_filter)
report.append({'doctor': doctor.doctor_name, 'total': 0, 'patients': []})
form_report_of_patients(report[-1], orders_query, only_not_paid_filter)
# Если задана только клиника
elif clinic_filter:
orders_query = filter_by_date(
Order.objects.filter(user=user, clinic=clinic).order_by('date'),
start_date_filter, end_date_filter)
report.append({'clinic': clinic.clinic_name, 'total': 0, 'patients': []})
form_report_of_patients(report[-1], orders_query, only_not_paid_filter)
# Выводим результат
return Response(report)
class OperationsReportView(views.APIView):
permission_classes = [IsAuthenticated]
def get(self, request):
user = request.user
report = []
query_params = request.query_params
start_date_filter, end_date_filter = [convert_to_datetime(query_params.get('start')),
convert_to_datetime(query_params.get('end'))]
technician_filter = query_params.get('technician')
if technician_filter:
technician_filter = check_is_int(technician_filter, 'Technician')
technician = Technician.objects.filter(user=user).filter(id=technician_filter).first()
if not technician:
raise NotFound(detail='No such technician')
operations_query = filter_by_date(
OperationsInOrders.objects.filter(user=user).filter(technician=technician).order_by(
'date'), start_date_filter, end_date_filter)
form_report_for_technician(report, technician, operations_query)
else:
operations_query = filter_by_date(OperationsInOrders.objects.filter(user=user).order_by('date'),
start_date_filter,
end_date_filter)
technicians_query = Technician.objects.filter(user=user)
for technician in technicians_query:
form_report_for_technician(report, technician, operations_query.filter(technician=technician))
return Response(report)
| artgas1/dlab_django | lab/lab_web/views/api.py | api.py | py | 18,206 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "rest_framework.viewsets.ModelViewSet",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.viewsets",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "rest_framework.viewsets.ModelViewSet",
"line_number": 45,
"usage_type":... |
42932594486 | import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Output, Input
import dash_bootstrap_components as dbc
import pandas as pd
import plotly.express as px
from plotly.graph_objs import *
def run_dash(bales_graph_file_path, pie_chart_file_path, bar_chart_file_path, leader_chart_file_path, leader):
df_bales = pd.read_csv(bales_graph_file_path)
df_word = pd.read_csv(pie_chart_file_path)
df_pos_neg = pd.read_csv(bar_chart_file_path)
df_leader = pd.read_csv(leader_chart_file_path)
leader_title = "Potential leader: Player " + str(leader)
leadership_style = "There is a task-oriented positive leadership style"
player_colors = ['#E06C75', '#98C379', '#E5C07B', '#C678DD', '#56B6C2']
leader_color = player_colors[leader-1]
app = dash.Dash(
external_stylesheets=[dbc.themes.SUPERHERO]
)
layout_set = Layout(
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)',
font=dict(
family="Verdana",
color="Black"
)
)
df_line_graph = df_bales
fig_bales = px.line(df_line_graph, x="Category", y="Amount", color="Player",
color_discrete_map={'Player 1': '#E06C75', 'Player 2': '#98C379', 'Player 3': '#E5C07B',
'Player 4': '#C678DD', 'Player 5': '#56B6C2'},
title="Bales Interaction Process Analysis Profiles")
df_pie_chart = df_word
fig_pie_chart = px.pie(df_pie_chart, values="Words", names='Player', hole=.5, color='Player',
color_discrete_map={'Player 1': '#E06C75', 'Player 2': '#98C379', 'Player 3': '#E5C07B',
'Player 4': '#C678DD', 'Player 5': '#56B6C2'},
title="Percentage of words "
"said")
df_bar_chart = df_pos_neg
fig_bar_chart = px.bar(df_bar_chart, x="Amount", y="Polarity", color="Player", orientation="h",
color_discrete_map={'Player 1': '#E06C75', 'Player 2': '#98C379', 'Player 3': '#E5C07B',
'Player 4': '#C678DD', 'Player 5': '#56B6C2'}, title="Amount of "
"positive and "
"negative "
"statements")
df_leader_chart = df_leader
fig_leader_chart = px.bar(df_leader_chart, x="Category", y="Amount (%)", color='Category', color_discrete_map={'Sentences': leader_color, 'Positive': leader_color, 'Negative': leader_color}, title=leader_title)
fig_bales.update_layout(layout_set)
fig_pie_chart.update_layout(layout_set)
fig_bar_chart.update_layout(layout_set)
fig_leader_chart.update_layout(layout_set)
fig_leader_chart.update_layout(showlegend=False)
app.layout = html.Div([
dbc.Row([
html.Div([html.Img(
src='https://upload.wikimedia.org/wikipedia/commons/thumb/f/f4/Logo_marine.svg/1200px-Logo_marine.svg.png',
style={'margin-left': '70%', 'margin-right': 'auto', 'margin-bottom': '1%'}),
html.Img(src='https://www.kiemt.nl/wp-content/uploads/2018/06/logos-partners_0000s_0017_UTwente.png',
style={'margin-left': 'auto', 'margin-right': '10%', 'margin-bottom': '1%'})],
style={'background-color': '#ffffff', 'width': '100%', 'height': '90px', 'display': 'flex'}),
]),
dbc.Row([
html.Div(
dbc.Col(html.H1("Communication and Leadership Competency Dashboard",
style={'textAlign': 'left', 'font-family': 'Verdana', 'background-color': '#0E61AA',
'font-size': '30px', 'margin': '5px'}), width=12))
], style={'background-color': '#0E61AA', 'padding-left': '1%', 'padding-top': '10px', 'padding-bottom': '10px',
'box-shadow': '0px 5px 2px #888888'}),
dbc.Row([
html.Div(
dbc.Col(dcc.Graph(
id="test",
figure=fig_bales)
),
style={'background-color': 'white', 'width': '50%', 'border-radius': '25px', 'margin-left': '3%',
'margin-top': '2%', 'margin-bottom': '2%', 'box-shadow': '5px 10px 5px'}),
html.Div(
dbc.Col(dbc.Col(dcc.Graph(
id="test4",
figure=fig_leader_chart)
)),
style={'background-color': 'white', 'width': '18.5%', 'border-radius': '25px', 'margin-left': '3%',
'margin-top': '2%', 'margin-bottom': '2%', 'box-shadow': '5px 10px 5px'}),
html.Div(
dbc.Col([html.H1("What type of leadership has the group leader?",
style={'textAlign': 'center', 'color': 'black', 'margin': '5px', 'margin-top': '20px',
'font-size': '25px'}),
html.Hr(style={'border': '1px solid black'}),
html.H2(leadership_style,
style={'textAlign': 'center', 'color': 'black', 'margin': '5px', 'margin-top': '20px',
'font-size': '20px'}),
html.H3("Source: Butler et al.",
style={'textAlign': 'right', 'color': 'black', 'margin': '5px', 'margin-top': '75%',
'font-size': '13px'})]),
style={'background-color': 'white', 'width': '18.5%', 'border-radius': '25px', 'margin-left': '3%',
'margin-top': '2%', 'margin-bottom': '2%', 'box-shadow': '5px 10px 5px'})
]),
dbc.Row([
html.Div(
dbc.Col(dcc.Graph(
id="test2",
figure=fig_pie_chart)
), style={'background-color': 'white', 'width': '35%', 'border-radius': '25px', 'margin-left': '3%',
'margin-bottom': '2%', 'box-shadow': '5px 10px 5px'}),
html.Div(
dbc.Col(dcc.Graph(
id="test3",
figure=fig_bar_chart)
),
style={'background-color': 'white', 'box-shadow': '5px 10px 5px', 'width': '55%',
'border-radius': '25px',
'margin-left': '3%', 'margin-bottom': '2%'})
])
], style={'background-color': '#f3f3f3'})
return app
| Tijzz/ComptencyAnalysisTool | Dash.py | Dash.py | py | 6,994 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
... |
44007402888 | from django.urls import path
from .views import (
BasicInfoView,
ContactFormView,
FindProviderMedicationView,
GetFormOptionsView,
)
public_api_urlpatterns = [
path(
'options/',
GetFormOptionsView.as_view(),
),
path(
'find_providers/',
FindProviderMedicationView.as_view(),
),
path(
'basic_info/',
BasicInfoView.as_view(),
),
path(
'contact_form/',
ContactFormView.as_view(),
),
]
| ninjadevtrack/medifiner-api | public/api_urls_v1.py | api_urls_v1.py | py | 521 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "views.GetFormOptionsView.as_view",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "views.GetFormOptionsView",
"line_number": 13,
"usage_type": "name"
},
{
"api_na... |
28145554362 | from flask import Flask, request, jsonify
from urllib.request import urlopen
import json
app = Flask(__name__, static_folder='static')
@app.route('/')
def index():
return app.send_static_file('index.html')
@app.route('/api/submit', methods=['POST'])
def submit():
data = request.get_json()
message = data.get('message')
message = input("response")
response = {'status': 'success', 'message': message}
return jsonify(response)
@app.route('/api/submit/setword', methods=['POST'])
def setWordOfTheDay():
data = request.get_json()
addWordToList(data.get('word'), data.get('definition'))
return jsonify({ 'status': 'success' })
@app.route('/api/submit/getword', methods=['POST'])
def getWordOfTheDay():
data = request.get_json()
current = getCurrentWord()
word = current.get('word')
definition = current.get('definition')
url = 'https://api.dictionaryapi.dev/api/v2/entries/en/' + word
return jsonify({ 'status': 'success', 'word': word, 'definition': definition })
def addWordToList(word, definition):
text = ''
with open(r'wordOfTheDayList.txt', 'r') as file:
text = file.read()
text += ', ' + word + ':' + definition
with open(r'wordOfTheDayList.txt', 'w') as file:
file.write(text)
def getCurrentWord():
with open(r'wordOfTheDayList.txt', 'r') as file:
text = file.read()
try:
list = text.split(", ")
list = list[len(list) - 1].split(":")
word = list[0]
definition = list[1]
return { 'word': word, 'definition': definition }
except:
print(Exception('Parsing error'))
return { 'word': 'error', 'definition': 'error' }
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000) | coconnor07/WebsiteTest | __main__.py | __main__.py | py | 1,807 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "flask.request.get_json",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "flask.request",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"... |
23452187462 | # coding=utf-8
import datetime
from OnlineClassroom.app.ext.plugins import db
from .curriculums import *
from .account import *
from .catalog import *
"""
购买记录
CREATE TABLE `shopping_carts` (
`aid` int DEFAULT NULL COMMENT '外键 用户id',
`cid` int DEFAULT NULL COMMENT '外键 课程id',
`number` int DEFAULT '1' COMMENT '课程数量,但是是网易云课堂类似的,默认就是1买把...',
`price` float(10,2) DEFAULT '0.00' COMMENT '购买时价格',
`create_at` datetime DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '创建时间',
KEY `uid` (`aid`),
KEY `cid` (`cid`),
CONSTRAINT `shopping_carts_ibfk_1` FOREIGN KEY (`aid`) REFERENCES `accounts` (`aid`),
CONSTRAINT `shopping_carts_ibfk_2` FOREIGN KEY (`cid`) REFERENCES `curriculums` (`cid`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
class ShoppingCarts(db.Model):
__tablename__ = "shopping_carts"
aid = db.Column(db.Integer,db.ForeignKey("accounts.aid"),primary_key=True, comment="外键 用户id")
cid = db.Column(db.Integer,db.ForeignKey("curriculums.cid"),primary_key=True,comment="外键 课程id")
number = db.Column(db.Integer,default=1)
price = db.Column(db.Float(10,2),default=0.00)
create_at = db.Column(db.DateTime,default=datetime.datetime.utcnow(),comment="创建时间")
_curriculum = db.relationship(Curriculums,backref="shop")
# user = db.relationship(Account,backref="shops")
def __repr__(self):
return "数据库{} {}_{}_{}_{}".format(self.__tablename__,self.cid,self.aid,self.number,self.price)
def __init__(self,aid=None,cid=None,price=None):
self.aid =aid
self.cid = cid
self.number = 1
self.price = price
self.create_at = datetime.datetime.now()
# 查询是否购买或者价格为0
def is_record(self):
shop = self.query.filter_by(aid=self.aid,cid=self.cid).first()
if shop == None:
cu = Curriculums.query.filter_by(cid=self.cid).first()
if cu.price <= float(0):
return True
else:
if shop.number != 1:
if not shop.curriculum.price <= float(0):
return False
return True
elif shop.number == 1:
return True
def get_curriculum__catalog(self):
_catalogs = Catalog.query.filter_by(cat_id=self.cid).all()
items = {}
list_time = []
for catalog in _catalogs:
list_time.append(catalog.serializetion_itme())
items["datas"] = list_time
items["len"] = len(_catalogs)
return items
def is_purchase(self):
shop = self.query.filter(ShoppingCarts.aid==self.aid,ShoppingCarts.cid==self.cid).first()
if shop == None:
return False
return shop.number == 1
def save(self):
self.number = 1
self.create_at = datetime.datetime.now()
return self.is_commit()
# commit
def is_commit(self):
try:
db.session.add(self)
db.session.commit()
return True
except Exception as e:
db.session.rollback()
return False
def serialize_item(self):
item = {
"aid":self.aid,
"nickname":self.aid.user.nickname,
"cid":self.cid,
"number":self.number,
"price":self.price,
"create_at":self.create_at
}
return item
def get_purchase_curriculums(self,page=1,number=10):
if page ==None:
page = 1
if number ==None:
number = 10
items = {}
list_item = []
shops = self.query.filter_by(aid=self.aid).paginate(int(page),int(number),False)
for shop in shops.items:
list_item.append(shop._curriculum.serialize_item())
items["datas"] = list_item
items["len"] = len(shops.items)
items["nexts"] = shops.pages
items["total"] = shops.total
return items
def get_days_shop_effective_sum(self,day):
sql = """
select
date_format(s.create_at,'%Y-%m-%d %H:%i:%S')as dateDay,sum(s.price)as moneyDay
FROM shopping_carts as s
group by dateDay
order by dateDay desc limit 0,{}
""".format(day)
results = db.session.execute(sql).fetchall()
items = sql_result_to_dict(results)
return items
def get_month_shop_effective_sum(self,month):
sql = """
select
date_format(s.create_at,'%Y-%m')as dateMonth,sum(s.price)as moneyMonth
FROM shopping_carts as s
group by dateMonth
order by dateMonth desc limit 0,{}
""".format(month)
results = db.session.execute(sql).fetchall()
items = sql_result_to_dict(results)
return items
def get_monerys(self):
sql = """
select sum(s.price)as moneys FROM shopping_carts as s
"""
results = db.session.execute(sql).fetchall()
items = sql_result_to_dict(results)
return items[0] | z1421012325/flask_online_classroom | app/models/shopping_carts.py | shopping_carts.py | py | 5,125 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "OnlineClassroom.app.ext.plugins.db.Model",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "OnlineClassroom.app.ext.plugins.db",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "OnlineClassroom.app.ext.plugins.db.Column",
"line_number": 2... |
5023782942 | # -*- coding: utf-8 -*-
import os
import shutil
import torch
# from torch.utils.data import *
from torch.utils import data
from imutils import paths
import numpy as np
import random
from PIL import Image
from torchvision.transforms import transforms
import cv2
def cv_imread(path):
img = cv2.imdecode(np.fromfile(path, dtype=np.uint8),-1)
return img
def cv_imwrite(path,img):
cv2.imencode('.jpg', img)[1].tofile(path)
def checkfiles(root):
if(not os.path.exists(root)):
os.mkdir(root)
else:
shutil.rmtree(root)
os.mkdir(root)
def ensurefiles(root):
if(not os.path.exists(root)):
os.mkdir(root)
else:
pass
class crack_loader(data.Dataset):
def __init__(self, img_file, imgSize, PreprocFun=None):
with open(img_file,'r',encoding='utf-8') as f:
lines = f.readlines()
self.img_paths = [line.strip().split(',') for line in lines]
self.img_size = imgSize
if PreprocFun is not None:
self.PreprocFun = PreprocFun
else:
self.PreprocFun = self.transform
def __len__(self):
return len(self.img_paths)
def __getitem__(self, index):
raw_img_path,label_img_path,mask_img_path = self.img_paths[index]
raw_img = Image.open(raw_img_path)
# raw_img = cv_imread(raw_img_path)
label_img = cv_imread(label_img_path)
mask_img = cv_imread(mask_img_path)
try:
height1, width1 = label_img.shape
except:
label_img = label_img.mean(2)
height1, width1 = label_img.shape
if height1 != self.img_size or width1 != self.img_size:
label_img = cv2.resize(label_img, (self.img_size,self.img_size))
raw_img = self.PreprocFun(raw_img)
return raw_img, torch.tensor(label_img/255.0,dtype=torch.long), torch.tensor(mask_img,dtype=torch.long)#.unsqueeze(0)#torch.tensor(raw_img,dtype=torch.float32)
def transform(self, img):
transform_pre = transforms.Compose(
[
transforms.Resize((512,512)),
# transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
]
)
img = transform_pre(img)
return img
if __name__ == "__main__":
train_set = r'./scheme_set/train.txt'
train_loader = crack_loader(train_set,512)
for i in train_loader:
print(i)
| stlyl/crack_detection | crack_detection_python/crack_dataset.py | crack_dataset.py | py | 2,601 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "cv2.imdecode",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.fromfile",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "cv2.imencode",
"line... |
7582763868 | import urllib.request, urllib.parse, urllib.error
import twurl
import json
import ssl
# https://apps.twitter.com/
# Create App and get the four strings, put them in hidden.py
TWITTER_URL = 'https://api.twitter.com/1.1/friends/list.json'
# Ignore SSL certificate errors
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
def get_data(acct, info):
"""
(string, string) ->
Function prints on the screen information from json file that user wants to get
"""
url = twurl.augment(TWITTER_URL,
{'screen_name': acct, 'count': '10'})
connection = urllib.request.urlopen(url, context=ctx)
headers = dict(connection.getheaders())
data = connection.read().decode()
js = json.loads(data)
for i in js['users']:
print(i['name'])
if info == "followers_count":
print(i["followers_count"])
elif info == "friends_count":
print(i["friends_count"])
elif info == "date of creation":
print(i["created_at"])
elif info == "id":
print(i["id"])
elif info == "screen_name":
print(i["screen_name"])
elif info == "location":
print(i["location"])
elif info == "url":
print(i["url"])
if __name__ == "__main__":
acct = input('Enter Twitter Account:')
print(" ")
print(["id", "location", "url", "followers_count", "date of creation", "screen_name"])
info = input('Enter where you want to get:')
print(get_data(acct, info))
| soliashuptar/LAB3 | twitter2.py | twitter2.py | py | 1,574 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "ssl.create_default_context",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "ssl.CERT_NONE",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "twurl.augment",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "urllib.re... |
27819397243 | import networkx as nx
import numpy as np
from params import args
class JobDAG(object):
def __init__(self, nodes, adj_mat, name):
# nodes: list of N nodes
# adj_mat: N by N 0-1 adjacency matrix, e_ij = 1 -> edge from i to j
assert len(nodes) == adj_mat.shape[0]
assert adj_mat.shape[0] == adj_mat.shape[1]
self.name = name
self.nodes = nodes
self.adj_mat = adj_mat
self.num_nodes = len(self.nodes)
self.num_nodes_done = 0
# set of executors currently running on the job
self.executors = set()
# the computation graph needs to be a DAG
assert is_dag(self.num_nodes, self.adj_mat)
# get the set of schedule nodes
self.frontier_nodes = set()
for node in self.nodes:
if node.is_schedulable():
self.frontier_nodes.add(node)
# assign job dag to node
self.assign_job_dag_to_node()
# dag is arrived
self.arrived = False
# dag is completed
self.completed = False
# dag start ime
self.start_time = None
# dag completion time
self.completion_time = np.inf
# map a executor number to an interval
self.executor_interval_map = self.get_executor_interval_map()
def assign_job_dag_to_node(self):
for node in self.nodes:
node.job_dag = self
def get_executor_interval_map(self):
executor_interval_map = {}
entry_pt = 0
# get the left most map
for e in range(args.executor_data_point[0] + 1):
executor_interval_map[e] = \
(args.executor_data_point[0],
args.executor_data_point[0])
# get the center map
for i in range(len(args.executor_data_point) - 1):
for e in range(args.executor_data_point[i] + 1,
args.executor_data_point[i + 1]):
executor_interval_map[e] = \
(args.executor_data_point[i],
args.executor_data_point[i + 1])
# at the data point
e = args.executor_data_point[i + 1]
executor_interval_map[e] = \
(args.executor_data_point[i + 1],
args.executor_data_point[i + 1])
# get the residual map
if args.exec_cap > args.executor_data_point[-1]:
for e in range(args.executor_data_point[-1] + 1,
args.exec_cap + 1):
executor_interval_map[e] = \
(args.executor_data_point[-1],
args.executor_data_point[-1])
return executor_interval_map
def reset(self):
for node in self.nodes:
node.reset()
self.num_nodes_done = 0
self.executors = set()
self.frontier_nodes = set()
for node in self.nodes:
if node.is_schedulable():
self.frontier_nodes.add(node)
self.arrived = False
self.completed = False
self.completion_time = np.inf
def update_frontier_nodes(self, node):
frontier_nodes_changed = False
for child in node.child_nodes:
if child.is_schedulable():
if child.idx not in self.frontier_nodes:
self.frontier_nodes.add(child)
frontier_nodes_changed = True
return frontier_nodes_changed
def is_dag(num_nodes, adj_mat):
G = nx.DiGraph()
G.add_nodes_from(range(num_nodes))
for i in range(num_nodes):
for j in range(num_nodes):
if adj_mat[i, j] == 1:
G.add_edge(i, j)
return nx.is_directed_acyclic_graph(G)
| SpeedSchedulerProject/MDPA | spark_env/job_dag.py | job_dag.py | py | 3,725 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.inf",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "params.args.executor_data_point",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "params.args",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "param... |
20371505372 | import tensorflow as tf
gpus = tf.config.list_physical_devices('GPU')
if gpus:
tf.config.set_logical_device_configuration(gpus[0], [tf.config.LogicalDeviceConfiguration(memory_limit=5292)])
import keras
import matplotlib.pyplot as plt
import numpy as np
from keras import layers, optimizers, losses, metrics, Model
from PIL import Image
from sklearn import preprocessing as pre
# Construct the discriminator.
def create_D():
input_img = keras.Input(shape=(256,256, 1))
conv1 = layers.Conv2D(8, (2,2), (2,2), padding='valid',
activation=layers.ReLU())(input_img)
drop1 = layers.Dropout(rate=0.5)(conv1)
down1 = layers.MaxPool2D(pool_size=(2,2), strides=(2,2), padding="valid")(drop1)
flat1 = layers.Flatten()(down1)
score = layers.Dense(1, activation='sigmoid')(flat1)
D: Model = Model(inputs=input_img, outputs=score)
D.compile(optimizer=optimizers.Nadam(learning_rate=0.002), loss=losses.BinaryCrossentropy(), metrics=metrics.MeanAbsoluteError())
return D
# Construct the generator.
def create_G():
random_input = keras.Input(shape=(100, ))
Dense1 = layers.Dense(units = 16*16)(random_input)
B_Norm1 = layers.BatchNormalization()(Dense1)
Relu1 = layers.LeakyReLU()(B_Norm1)
reshape1 = layers.Reshape(target_shape=(16, 16, 1))(Relu1)
DeConv1 = layers.Conv2DTranspose(filters=4, kernel_size=(2, 2), strides=(1,1), padding='same')(reshape1)
B_Norm2 = layers.BatchNormalization()(DeConv1)
Relu2 = layers.LeakyReLU()(B_Norm2)
DeConv2 = layers.Conv2DTranspose(filters=16, kernel_size=(3,3), strides=(1,1), padding='same')(Relu2)
re = layers.Reshape((16,16,16,1))(DeConv2)
maxi0 = layers.MaxPool3D(pool_size=(4,4,2))(re)
B_Norm3 = layers.BatchNormalization()(maxi0)
Relu3 = layers.LeakyReLU()(B_Norm3)
flat0 = layers.Flatten()(Relu3)
dense00 = layers.Dense(units=256*256, activation='tanh')(flat0)
output_img = layers.Reshape((256, 256, 1))(dense00)
G = Model(inputs=random_input, outputs=output_img)
return G
# Join the discriminator and generator, forming the final GAN model.
def create_GAN():
D = create_D()
G = create_G()
latent_input = keras.Input(shape=(100, ))
img = G(latent_input)
D.trainable = False
score = D(img)
GAN = Model(inputs = latent_input, outputs=score)
GAN.compile(loss=losses.BinaryCrossentropy(),
optimizer=optimizers.Nadam(learning_rate=0.002), metrics=metrics.MeanAbsoluteError())
return D, G, GAN
# Function returns a sample of latent points from a normal distribution.
def getLatentSamples(num_samples) -> np.ndarray:
latent = np.random.normal(size=100*int(num_samples))
latent = np.reshape(a=latent, newshape=(int(num_samples), 100))
return latent
# Function imputs latent data into the generator and returns the fake image samples outputted
# by the generator.
def getRealSamples(batch_size: int, train_img: np.ndarray):
X_real, y_real = np.array(train_img.tolist() * batch_size, dtype=np.float32).reshape((batch_size, train_img.shape[0], train_img.shape[1])), np.ones(shape=batch_size, dtype=np.int8)
assert X_real.shape == (batch_size, train_img.shape[0], train_img.shape[1])
assert y_real.shape == (batch_size, )
return X_real, y_real
# Function imputs latent data into the generator and returns the fake image samples outputted
# by the generator.
def getFakeSamples(G:Model, batch_size:int):
latent = getLatentSamples(batch_size)
assert latent.shape == (batch_size, 100)
fake_imgs = G(latent)
fake_imgs = np.asarray(fake_imgs, dtype=np.float32)
assert fake_imgs.shape[0] == batch_size
fake_imgs = np.squeeze(fake_imgs)
output_labels = np.zeros(shape=batch_size, dtype=np.int8)
assert output_labels.shape == (batch_size, )
return fake_imgs, output_labels
# Function plots a sample of 16 'predicted' images from the trained generator.
def pred_plot(G: Model, epoch:int, train_img_shape: tuple[int, int]):
shape = train_img_shape
latent = getLatentSamples(1)
gen_image = G(latent)
gen_image = np.asarray(gen_image, dtype=np.float32)
gen_image = np.squeeze(gen_image)
assert gen_image.shape == shape
filename = str('images/gen3-%d.png' %epoch)
plt.imsave(filename, gen_image, cmap='gray')
if epoch % 100 == 0:
im=Image.open(filename)
im.load()
im.convert('L').show()
# Function plots the networks loss values with respect to epoch.
def loss_plot(dLossIn, gLossIn):
fig, ax = plt.subplots(figsize=(12, 8))
ax.plot(dLossIn, label='Discriminator Loss')
ax.plot(gLossIn, label='GAN (generator) Loss')
ax.legend()
ax.set_xlim(xmin=-5, xmax=305)
ax.set_ylim(ymin=-0.2, ymax=1.2)
plt.xlabel('Epoch')
plt.ylabel('Loss value')
plt.title("Loss value vs Epoch for Generator and Discriminator")
# Function trains the GAN (generator) model.
def trainGAN(D:Model, G:Model, GAN:Model, num_epochs: int, batchSize: int, train_img: np.ndarray, train_img_shape: tuple[int, int]):
d_loss, g_loss = [], []
d_mae, g_mae = [], []
pred_plot(G, 0, train_img_shape)
for epoch in range(1, num_epochs+1):
X_real, y_real = getRealSamples(int(batchSize/2), train_img)
X_fake, y_fake = getFakeSamples(G, int(batchSize / 2))
assert X_real.shape == X_fake.shape
assert X_real.shape[0] == int(batchSize / 2)
assert y_real.shape == y_fake.shape
assert y_real.shape[0] == int(batchSize / 2)
D.trainable = True
d_loss_fake, mae_fake = D.train_on_batch(X_fake, y_fake)
d_loss_real, mae_real = D.train_on_batch(X_real, y_real)
D.trainable = False
X_latent, y_latent = getLatentSamples(batchSize), np.ones(shape=batchSize, dtype=np.int8)
loss, mae = GAN.train_on_batch(X_latent, y_latent)
d_loss_res = float((d_loss_fake + d_loss_real) / 2)
d_mae_res = float((mae_fake + mae_real) / 2)
d_loss.append(d_loss_res)
d_mae.append(d_mae_res)
g_loss.append(loss)
g_mae.append(mae)
if (epoch % 50 == 0) and epoch != 0:
pred_plot(G, epoch, train_img_shape)
models = (D, G, GAN)
filepaths = ('D_final.h5', 'G_final.h5', 'GAN_final.h5')
for model, name in zip(models, filepaths):
model.save(name)
print('Epoch: %d\nDiscriminator Loss: %.3f, Generator Loss: %.3f\nDiscriminator MAE: %.3f, Generator MAE: %.3f' % (epoch, d_loss[-1], g_loss[-1], d_mae[-1], g_mae[-1]))
fig, ax = plt.subplots(2,2, figsize=(11,11))
ax[0][0].plot(list(range(len(d_loss))), d_loss, label='D_loss', color='red', linestyle='dashed', linewidth=1)
ax[0][1].plot(list(range(len(g_loss))), g_loss, label='G_loss', color='red', linestyle='solid', linewidth=1)
ax[1][0].plot(list(range(len(d_mae))), d_mae, label='D_mae', color='blue', linestyle='dashed', linewidth=1)
ax[1][1].plot(list(range(len(g_mae))), g_mae, label='G_mae', color='blue', linestyle='solid', linewidth=1)
fig.legend()
fig.tight_layout()
plt.savefig("loss_plots")
plt.show()
if __name__ == '__main__':
img = Image.open("orig_img_color.png")
img.load()
img = img.convert(mode='RGB').convert(mode='L')
img = np.asarray(img, dtype=np.float32)
try:
assert img.shape == (256, 256)
except:
img = np.reshape(a=img, newshape=(256, 256))
training_img = pre.minmax_scale(img.flatten(), (-1,1))
training_img = training_img.reshape(img.shape)
D, G, GAN = create_GAN()
def show_summary(D, G, GAN):
D.summary()
G.summary()
GAN.summary()
verbose = input('Enter T/F for verbose output: ')
if verbose == 'T':
show_summary(D, G, GAN)
trainGAN(D, G, GAN, 10000, 16, training_img, training_img.shape)
| Logann120/Logann120.github.io | img_gen/python_code/img_gen.py | img_gen.py | py | 7,935 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tensorflow.config.list_physical_devices",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "tensorflow.config",
"line_number": 2,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.config.set_logical_device_configuration",
"line_number": 4,
"usag... |
24916190530 | #encoding:utf-8
import os
import requests
class MyRequests():
def get_url(self, url, headers):
re = self.request(url, headers)
return re
def request(self, url, headers):
re = requests.get(url, headers=headers)
return re
def main():
url = "https://www.baidu.com"
headers = {
'Accept': 'text/html, application/xhtml+xml, image/jxr, */*',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN',
'Connection': 'Keep-Alive',
'Cookie': 'BD_CK_SAM=1; BD_HOME=1; BD_UPN=1d314753; ispeed_lsm=2; delPer=0; H_PS_645EC=d5bbSBejmXxPM1%2BsCmVEyoFLm39JwsP3AbAlAoSW0b0%2BplUfviS57%2FLdlE4b6KwhNKa176%2BUEcGq; H_PS_PSSID=; BDRCVFR[feWj1Vr5u3D]=I67x6TjHwwYf0; PSINO=5; BDRCVFR[4r8LXJfwh-6]=I67x6TjHwwYf0; BAIDUID=79468E69F21EBFAC3378FA10DD014840:FG=1; BIDUPSID=79468E69F21EBFAC3378FA10DD014840; PSTM=1529030279; MCITY=-%3A; BDUSS=DRWYnNvNHh2ZnJJdEdFT1BQTndGa3pnYUF4ZUhoLUwwaUVYZU5JNlJvcHZERlJiQVFBQUFBJCQAAAAAAAAAAAEAAACZ9sEfcGVuZ2ZhbjEyMzc4MwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAG9~LFtvfyxbd; BDORZ=B490B5EBF6F3CD402E515D22BCDA1598',
'Host': 'www.baidu.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36 Edge/15.15063'
}
req = MyRequests()
re = req.get_url(url, headers=headers)
print("retype:%s\n" %(type(re)))
print("status_code:%s\n" %(re.status_code))
print("texttype:%s\n" %(type(re.text)))
print("text:%s\n" %(re.text))
print("cookies:%s\n" %(re.cookies))
print("content:%s\n" %(re.content))
print("decode:%s\n" %(re.content.decode("utf-8")))
if ("__main__"==__name__):
main() | fanpengcs/python | my_requests.py | my_requests.py | py | 1,750 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 12,
"usage_type": "call"
}
] |
36901679046 | import logging
from math import sqrt
from typing import Optional
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import torch
from tqdm import trange
from ..common import HistoricalData, get_device
from ..exceptions import SimulationException
from . import Simulations
logger = logging.getLogger("pyfmc.simulations.gbm")
class Trajectory:
def __init__(self, dist: torch.Tensor, label: str = "Trajectory") -> None:
self.dist = dist.numpy()
self.label = label
def value(self):
return self.dist
def plot(self, title=None, xlabel=None, ylabel=None):
fig, ax = plt.subplots()
sns.lineplot(data=self.dist, legend=False)
ax.set_xlabel(xlabel or "time")
ax.set_ylabel(ylabel or self.label)
ax.set_title(title or self.label)
return fig, ax
class Distribution:
def __init__(self, dist: torch.Tensor, label: str = "Distribution"):
self.dist = dist.numpy()
self.label = label
def value(self):
return self.dist
def plot(self, bins=10, kde=False, title=None, xlabel=None, ylabel=None):
fig, ax = plt.subplots()
if kde:
sns.kdeplot(data=self.dist, color="blue", fill=True, ax=ax)
else:
sns.histplot(self.dist, bins=bins, ax=ax)
ax.set_xlabel(xlabel or self.label)
ax.set_ylabel(ylabel or ("Density" if kde else "Counts"))
ax.set_title(title or self.label)
return fig, ax
def __str__(self) -> str:
return str(self.dist)
class GBMResult:
def __init__(
self, init_dist: torch.Tensor, final_dist: torch.Tensor, trajectories: Optional[torch.Tensor] = None
) -> None:
self.init_dist = init_dist.cpu()
self.final_dist = final_dist.cpu()
self._trajectories = trajectories.cpu() if trajectories is not None else trajectories
def price_distribution(self):
return Distribution(self.final_dist, label="Price Distribution")
def trajectories(self):
if self._trajectories is None:
logger.warning("No trajectories")
return
return Trajectory(self._trajectories)
def return_distribution(self):
if self.final_dist.size() != self.init_dist.size():
logger.warning("Size mismatch due to MPS mode, fixing ...")
init_dist = self.init_dist[: self.final_dist.size(0)]
else:
init_dist = self.init_dist
return Distribution((self.final_dist - init_dist) / init_dist, label="Return Distribution")
def VaR(self, alpha: float):
return np.percentile(self.return_distribution().value(), alpha)
class GBM(Simulations):
def __init__(
self,
df: pd.DataFrame,
n_walkers: int,
n_steps: int,
n_trajectories: int = 0,
step_size: float = 1,
open_index: str = "Open",
close_index: str = "Close",
device_acc: bool = False,
) -> None:
super().__init__(n_walkers, n_steps, device_acc)
self.df = df
self.step_size = step_size
self.open_index = open_index
self.close_index = close_index
self.n_trajectories = n_trajectories
if (open_index and close_index) not in df.columns:
raise SimulationException("Wrong open_index or close_index")
def simulate(self):
hist_data = HistoricalData(self.df, self.open_index, self.close_index)
device = get_device() if self.device_acc else torch.device("cpu")
logger.info("Using %s for calculation ...", device)
dtype = torch.float32 if device == torch.device("mps") else torch.float64
logger.info("Using device: %s", device)
exp_return = torch.tensor(hist_data.return_mean, device=device, dtype=dtype)
std_return = torch.tensor(hist_data.return_std, device=device, dtype=dtype)
last_price = hist_data.get_latest_close_price()
s0 = torch.tensor([last_price] * self.n_walkers, device=device, dtype=dtype)
init_dist = torch.clone(s0)
trajectories = init_dist[: self.n_trajectories] if self.n_trajectories > 0 else None
s1 = torch.zeros(self.n_walkers, device=device, dtype=dtype)
ds = torch.zeros(self.n_walkers, device=device, dtype=dtype)
dt = torch.tensor(self.step_size)
for _ in trange(self.n_steps):
epsilon = torch.randn(self.n_walkers, device=device, dtype=dtype)
shock = (std_return * sqrt(dt)) * epsilon
drift = dt * exp_return
ds = (shock + drift) * s0
s1 = s0 + ds
s0 = torch.clone(s1)
if self.n_trajectories > 0:
trajectories = torch.concat((trajectories, s0[: self.n_trajectories]))
s0 = s0[torch.isfinite(s0)]
return GBMResult(
init_dist,
s0,
trajectories.reshape([self.n_steps + 1, self.n_trajectories]) if self.n_trajectories > 0 else None,
)
| ethanlee928/pyfmc | pyfmc/simulations/gbm.py | gbm.py | py | 5,015 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "torch.Tensor",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "matplo... |
41550884221 | import math
import warnings
from typing import Sequence
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchmetrics.functional import pairwise_cosine_similarity
from mmcv.cnn import (build_activation_layer, build_conv_layer,
build_norm_layer, xavier_init)
from mmcv.cnn.bricks.registry import (TRANSFORMER_LAYER,
TRANSFORMER_LAYER_SEQUENCE)
from mmcv.cnn.bricks.transformer import (BaseTransformerLayer,
TransformerLayerSequence,
build_transformer_layer_sequence)
from torch.nn.init import normal_
from .builder import ROTATED_TRANSFORMER
from mmdet.models.utils import Transformer
from mmdet.models.utils.transformer import inverse_sigmoid
from mmcv.ops import diff_iou_rotated_2d
from fast_pytorch_kmeans import KMeans, MultiKMeans
# from mmrotate.core import obb2poly, poly2obb
# # from mmrotate.core import obb2xyxy
# from mmdet.core import bbox_cxcywh_to_xyxy
try:
from mmcv.ops.multi_scale_deform_attn import MultiScaleDeformableAttention
except ImportError:
warnings.warn(
'`MultiScaleDeformableAttention` in MMCV has been moved to '
'`mmcv.ops.multi_scale_deform_attn`, please update your MMCV')
from mmcv.cnn.bricks.transformer import MultiScaleDeformableAttention
torch.autograd.set_detect_anomaly(True)
kmeans = KMeans(n_clusters=15, mode='cosine',verbose=1)
def obb2poly_tr(rboxes):
"""Convert oriented bounding boxes to polygons.
Args:
obbs (torch.Tensor): [x_ctr,y_ctr,w,h,angle]
Returns:
polys (torch.Tensor): [x0,y0,x1,y1,x2,y2,x3,y3]
"""
x = rboxes[..., 0]
y = rboxes[..., 1]
w = rboxes[..., 2]
h = rboxes[..., 3]
a = rboxes[..., 4]
cosa = torch.cos(a)
sina = torch.sin(a)
wx, wy = w / 2 * cosa, w / 2 * sina
hx, hy = -h / 2 * sina, h / 2 * cosa
p1x, p1y = x - wx - hx, y - wy - hy
p2x, p2y = x + wx - hx, y + wy - hy
p3x, p3y = x + wx + hx, y + wy + hy
p4x, p4y = x - wx + hx, y - wy + hy
return torch.stack([p1x, p1y, p2x, p2y, p3x, p3y, p4x, p4y], dim=2)
def bbox_cxcywh_to_xyxy_tr(bbox):
"""Convert bbox coordinates from (cx, cy, w, h) to (x1, y1, x2, y2).
Args:
bbox (Tensor): Shape (n, 4) for bboxes.
Returns:
Tensor: Converted bboxes.
"""
cx, cy, w, h = bbox.split((1, 1, 1, 1), dim=-1)
bbox_new = [(cx - 0.5 * w), (cy - 0.5 * h), (cx + 0.5 * w), (cy - 0.5 * h),
(cx - 0.5 * w), (cy + 0.5 * h), (cx + 0.5 * w), (cy + 0.5 * h)]
return torch.cat(bbox_new, dim=-1)
@ROTATED_TRANSFORMER.register_module()
class RotatedDeformableDetrTransformer(Transformer):
"""Implements the DeformableDETR transformer.
Args:
as_two_stage (bool): Generate query from encoder features.
Default: False.
num_feature_levels (int): Number of feature maps from FPN:
Default: 4.
two_stage_num_proposals (int): Number of proposals when set
`as_two_stage` as True. Default: 300.
"""
def __init__(self,
as_two_stage=False,
num_feature_levels=5,
two_stage_num_proposals=300,
mixed_selection = True,
**kwargs):
super(RotatedDeformableDetrTransformer, self).__init__(**kwargs)
self.as_two_stage = as_two_stage
self.num_feature_levels = num_feature_levels
self.two_stage_num_proposals = two_stage_num_proposals
self.mixed_selection = mixed_selection
self.embed_dims = self.encoder.embed_dims
self.init_layers()
def init_layers(self):
"""Initialize layers of the DeformableDetrTransformer."""
self.level_embeds = nn.Parameter(
torch.Tensor(self.num_feature_levels, self.embed_dims))
if self.as_two_stage:
self.enc_output = nn.Linear(self.embed_dims, self.embed_dims)
self.enc_output_cls = nn.Linear(8, 256)
self.enc_output_reg = nn.Linear(5, 256)
self.enc_output_pro = nn.Linear(5, 256)
self.enc_output_cls_norm = nn.LayerNorm(256)
self.enc_output_reg_norm = nn.LayerNorm(256)
self.enc_output_pro_norm = nn.LayerNorm(256)
self.enc_output_norm = nn.LayerNorm(self.embed_dims)
self.pos_trans = nn.Linear(self.embed_dims*2,
self.embed_dims*2)
self.pos_trans_xy = nn.Linear(self.embed_dims,
self.embed_dims)
self.pos_trans_whacls = nn.Linear(self.embed_dims,
self.embed_dims)
self.pos_trans_norm = nn.LayerNorm(self.embed_dims*2)
self.pos_trans_xy_norm = nn.LayerNorm(self.embed_dims)
self.pos_trans_whacls_norm = nn.LayerNorm(self.embed_dims)
self.tgt_embed = nn.Embedding(self.two_stage_num_proposals, self.embed_dims)
self.two_stage_wh_embedding = nn.Embedding(1, 2)
# self.two_stage_wh1_embedding = nn.Embedding(1,512)
# self.two_stage_wh2_embedding = nn.Embedding(1,2048)
# self.two_stage_wh3_embedding = nn.Embedding(1,8192)
# self.two_stage_wh4_embedding = nn.Embedding(1,32768)
self.two_stage_theta1_embedding = nn.Embedding(1,256)
self.two_stage_theta2_embedding = nn.Embedding(1,1024)
self.two_stage_theta3_embedding = nn.Embedding(1,4096)
self.two_stage_theta4_embedding = nn.Embedding(1,16384)
nn.init.normal_(self.tgt_embed.weight.data)
else:
self.reference_points = nn.Linear(self.embed_dims, 2)
def init_weights(self):
"""Initialize the transformer weights."""
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
for m in self.modules():
if isinstance(m, MultiScaleDeformableAttention):
m.init_weights()
if not self.as_two_stage:
xavier_init(self.reference_points, distribution='uniform', bias=0.)
normal_(self.level_embeds)
nn.init.constant_(self.two_stage_wh_embedding.weight, math.log(0.05 / (1 - 0.05)))
# nn.init.uniform_(self.two_stage_wh1_embedding.weight, a=math.log(0.05 / (1 - 0.05)),b=math.log(0.08 / (1 - 0.08)))
# nn.init.uniform_(self.two_stage_wh2_embedding.weight, a=math.log(0.05 / (1 - 0.05)),b=math.log(0.08 / (1 - 0.08)))
# nn.init.uniform_(self.two_stage_wh3_embedding.weight, a=math.log(0.04 / (1 - 0.04)),b=math.log(0.05 / (1 - 0.05)))
# nn.init.uniform_(self.two_stage_wh4_embedding.weight, a=math.log(0.04 / (1 - 0.04)),b=math.log(0.05 / (1 - 0.05)))
# nn.init.uniform_(self.two_stage_wh1_embedding.weight, a=0.36,b=0.44)
# nn.init.uniform_(self.two_stage_wh2_embedding.weight, a=0.17,b=0.23)
# nn.init.uniform_(self.two_stage_wh3_embedding.weight, a=0.08,b=0.12)
# nn.init.uniform_(self.two_stage_wh4_embedding.weight, a=0.03,b=0.07)
nn.init.uniform_(self.two_stage_theta1_embedding.weight, a=0.0,b=(np.pi/2))
nn.init.uniform_(self.two_stage_theta2_embedding.weight, a=0.0,b=(np.pi/2))
nn.init.uniform_(self.two_stage_theta3_embedding.weight, a=0.0,b=(np.pi/2))
nn.init.uniform_(self.two_stage_theta4_embedding.weight, a=0.0,b=(np.pi/2))
# def gen_encoder_output_proposals(self, memory, memory_padding_mask,
# spatial_shapes, inputwh=None, learnedwh1=None, learnedwh2=None, learnedwh3=None,
# learnedwh4=None, learnedtheta1=None ,learnedtheta2=None,
# learnedtheta3=None, learnedtheta4=None):
def gen_encoder_output_proposals(self, memory, memory_padding_mask,
spatial_shapes, inputwh=None, learnedtheta1=None ,learnedtheta2=None,
learnedtheta3=None, learnedtheta4=None):
"""Generate proposals from encoded memory.
Args:
memory (Tensor) : The output of encoder,
has shape (bs, num_key, embed_dim). num_key is
equal the number of points on feature map from
all level.
memory_padding_mask (Tensor): Padding mask for memory.
has shape (bs, num_key).
spatial_shapes (Tensor): The shape of all feature maps.
has shape (num_level, 2).
Returns:
tuple: A tuple of feature map and bbox prediction.
- output_memory (Tensor): The input of decoder, \
has shape (bs, num_key, embed_dim). num_key is \
equal the number of points on feature map from \
all levels.
- output_proposals (Tensor): The normalized proposal \
after a inverse sigmoid, has shape \
(bs, num_keys, 4).
"""
N, S, C = memory.shape
proposals = []
_cur = 0
# 生成网格一样的proposals
# spatial_shapes = 16x16, 32x32, 64x64, 128x128
for lvl, (H, W) in enumerate(spatial_shapes):
mask_flatten_ = memory_padding_mask[:, _cur:(_cur + H * W)].view(
N, H, W, 1)
valid_H = torch.sum(~mask_flatten_[:, :, 0, 0], 1)
valid_W = torch.sum(~mask_flatten_[:, 0, :, 0], 1)
grid_y, grid_x = torch.meshgrid(
torch.linspace(
0, H - 1, H, dtype=torch.float32, device=memory.device),
torch.linspace(
0, W - 1, W, dtype=torch.float32, device=memory.device))
grid = torch.cat([grid_x.unsqueeze(-1), grid_y.unsqueeze(-1)], -1)
scale = torch.cat([valid_W.unsqueeze(-1),
valid_H.unsqueeze(-1)], 1).view(N, 1, 1, 2)
grid = (grid.unsqueeze(0).expand(N, -1, -1, -1) + 0.5) / scale
# wh = torch.ones_like(grid) * 0.05 * (2.0**lvl)
# grid point 마다 wh를 learn할 수 있도록 생성
# 각 layer level 별로 embedding을 줘서 학습?
# wh를 hxx로 줘서 학습가능하게?
# if H == 16:
# # learnedwh1 = torch.reshape(learnedwh1,(16,16,2))
# # learnedwh1 = learnedwh1.unsqueeze(0)
# # learnedwh1 = learnedwh1.repeat(N,1,1,1)
# learnedtheta1 = torch.reshape(learnedtheta1,(16,16,1))
# learnedtheta1 = learnedtheta1.unsqueeze(0)
# learnedtheta1 = learnedtheta1.repeat(N,1,1,1)
# # # # wh = torch.mul(torch.ones_like(grid),(learnedwh1.sigmoid() * (2.0**lvl)))
# # wh = torch.mul(torch.ones_like(grid),(learnedwh1))
# angle = learnedtheta1
# # # # print(wh[4][15])
# if H == 32:
# # # # learnedwh2 = self.two_stage_wh2_embedding.weight[0]
# # learnedwh2 = torch.reshape(learnedwh2,(32,32,2))
# # learnedwh2 = learnedwh2.unsqueeze(0)
# # learnedwh2 = learnedwh2.repeat(N,1,1,1)
# learnedtheta2 = torch.reshape(learnedtheta2,(32,32,1))
# learnedtheta2 = learnedtheta2.unsqueeze(0)
# learnedtheta2 = learnedtheta2.repeat(N,1,1,1)
# # # # wh = torch.mul(torch.ones_like(grid),(learnedwh2.sigmoid() * (2.0**lvl)))
# # wh = torch.mul(torch.ones_like(grid),(learnedwh2))
# angle = learnedtheta2
# # # # print('------------------------------')
# # # # print(wh[4][15])
# # # # print('------------------------------')
# if H == 64:
# # # # learnedwh3 = self.two_stage_wh3_embedding.weight[0]
# # learnedwh3 = torch.reshape(learnedwh3,(64,64,2))
# # learnedwh3 = learnedwh3.unsqueeze(0)
# # learnedwh3 = learnedwh3.repeat(N,1,1,1)
# learnedtheta3 = torch.reshape(learnedtheta3,(64,64,1))
# learnedtheta3 = learnedtheta3.unsqueeze(0)
# learnedtheta3 = learnedtheta3.repeat(N,1,1,1)
# # # # wh = torch.mul(torch.ones_like(grid),(learnedwh3.sigmoid() * (2.0**lvl)))
# # wh = torch.mul(torch.ones_like(grid),(learnedwh3))
# angle = learnedtheta3
# if H == 128:
# # # # learnedwh4 = self.two_stage_wh4_embedding.weight[0]
# # learnedwh4 = torch.reshape(learnedwh4,(128,128,2))
# # learnedwh4 = learnedwh4.unsqueeze(0)
# # learnedwh4 = learnedwh4.repeat(N,1,1,1)
# learnedtheta4 = torch.reshape(learnedtheta4,(128,128,1))
# learnedtheta4 = learnedtheta4.unsqueeze(0)
# learnedtheta4 = learnedtheta4.repeat(N,1,1,1)
# # # # wh = torch.mul(torch.ones_like(grid),(learnedwh4.sigmoid() * (2.0**lvl)))
# # wh = torch.mul(torch.ones_like(grid),(learnedwh4))
# angle = learnedtheta4
# # if H != 16:
# # wh = torch.ones_like(grid) * learnedwh.sigmoid() * (2.0**lvl)
if inputwh is not None:
# import ipdb; ipdb.set_trace()
wh = torch.ones_like(grid) * inputwh.sigmoid() * (2.0 ** lvl)
# wh = torch.ones_like(grid) * 0.05 * (2.0**lvl)
angle = torch.zeros_like(mask_flatten_)
proposal = torch.cat((grid, wh, angle), -1).view(N, -1, 5)
# proposal = torch.cat((grid, wh), -1).view(N, -1, 4)
proposals.append(proposal)
_cur += (H * W)
output_proposals = torch.cat(proposals, 1)
output_proposals_valid = ((output_proposals[..., :4] > 0.01) &
(output_proposals[..., :4] < 0.99)
).all(
-1, keepdim=True)
# output_proposals_valid = ((output_proposals[..., :4] > 0.01) &
# (output_proposals[..., :4] < 0.99)&
# (output_proposals[..., 4:5] > 0.00)&
# (output_proposals[..., 4:5] < (np.pi/2))
# ).all(
# -1, keepdim=True)
# output_proposals_valid1 = ((output_proposals[...,4:5] > 0.00) &
# (output_proposals[...,4:5] < np.pi/2)).all(
# -1, keepdim=True)
# 反sigmoid函数 inversigmoid
output_proposals[..., :4] = torch.log(output_proposals[..., :4].clone() / (1 - output_proposals[..., :4].clone()))
# output_proposals[..., :4] = torch.log(output_proposals[..., :4] / (1 - output_proposals[..., :4]))
# output_proposals = output_proposals.masked_fill(
# memory_padding_mask.unsqueeze(-1), float('inf'))
# output_proposals = output_proposals.masked_fill(
# ~output_proposals_valid, float('inf'))
output_proposals[..., :4] = output_proposals[..., :4].masked_fill(
memory_padding_mask.unsqueeze(-1), 10000)
output_proposals[..., :4] = output_proposals[..., :4].masked_fill(
~output_proposals_valid, 10000)
# output_proposals[..., 4:5] = output_proposals[..., 4:5].masked_fill(
# memory_padding_mask.unsqueeze(-1), 0)
# output_proposals[..., 4:5] = output_proposals[...,4:5].masked_fill(
# ~output_proposals_valid, 0)
output_memory = memory
output_memory = output_memory.masked_fill(
memory_padding_mask.unsqueeze(-1), float(0))
output_memory = output_memory.masked_fill(~output_proposals_valid,
float(0))
output_memory = self.enc_output_norm(self.enc_output(output_memory))
return output_memory, output_proposals
@staticmethod
def get_reference_points(spatial_shapes, valid_ratios, device):
"""Get the reference points used in decoder.
Args:
spatial_shapes (Tensor): The shape of all
feature maps, has shape (num_level, 2).
valid_ratios (Tensor): The radios of valid
points on the feature map, has shape
(bs, num_levels, 2)
device (obj:`device`): The device where
reference_points should be.
Returns:
Tensor: reference points used in decoder, has \
shape (bs, num_keys, num_levels, 2).
"""
reference_points_list = []
for lvl, (H, W) in enumerate(spatial_shapes):
# TODO check this 0.5
ref_y, ref_x = torch.meshgrid(
torch.linspace(
0.5, H - 0.5, H, dtype=torch.float32, device=device),
torch.linspace(
0.5, W - 0.5, W, dtype=torch.float32, device=device))
ref_y = ref_y.reshape(-1)[None] / (
valid_ratios[:, None, lvl, 1] * H)
ref_x = ref_x.reshape(-1)[None] / (
valid_ratios[:, None, lvl, 0] * W)
ref = torch.stack((ref_x, ref_y), -1)
reference_points_list.append(ref)
reference_points = torch.cat(reference_points_list, 1)
reference_points = reference_points[:, :, None] * valid_ratios[:, None]
return reference_points
def get_valid_ratio(self, mask):
"""Get the valid radios of feature maps of all level."""
_, H, W = mask.shape
valid_H = torch.sum(~mask[:, :, 0], 1)
valid_W = torch.sum(~mask[:, 0, :], 1)
valid_ratio_h = valid_H.float() / H
valid_ratio_w = valid_W.float() / W
valid_ratio = torch.stack([valid_ratio_w, valid_ratio_h], -1)
return valid_ratio
##### theta 고려해서 position encoding 처리필요
def get_proposal_pos_embed(self,
proposals,
num_pos_feats=128,
temperature=10000):
"""Get the position embedding of proposal."""
scale = 2 * math.pi
dim_t = torch.arange(
num_pos_feats, dtype=torch.float32, device=proposals.device)
dim_t = temperature**(2 * (dim_t // 2) / num_pos_feats)
# N, L, 4
proposals[:,:,:4] = proposals[:,:,:4].sigmoid() * scale
# N, L, 4, 128
pos = proposals[:, :, :, None] / dim_t
# N, L, 4, 64, 2
pos = torch.stack((pos[:, :, :, 0::2].sin(), pos[:, :, :, 1::2].cos()),
dim=4).flatten(2)
return pos
def get_proposal_pos_xy_embed(self,
proposals,
num_pos_feats=128,
temperature=10000):
"""Get the position embedding of proposal."""
scale = 2 * math.pi
dim_t = torch.arange(
num_pos_feats, dtype=torch.float32, device=proposals.device)
dim_t = temperature**(2 * (dim_t // 2) / num_pos_feats)
# N, L, 2
proposals[:,:,:2] = proposals[:,:,:2].sigmoid() * scale
# N, L, 2, 128
pos_xy = proposals[:, :, :, None] / dim_t
# N, L, 256
pos_xy = torch.stack((pos_xy[:, :, :, 0::2].sin(), pos_xy[:, :, :, 1::2].cos()),
dim=4).flatten(2)
return pos_xy
def get_proposal_pos_wha_embed(self,
proposals,
num_pos_feats=64,
temperature=10000):
"""Get the position embedding of proposal."""
scale = 2 * math.pi
dim_t = torch.arange(
num_pos_feats, dtype=torch.float32, device=proposals.device)
dim_t = temperature**(2 * (dim_t // 2) / num_pos_feats)
# N, L, 3
proposals[:,:,2:5] = proposals[:,:,2:5].sigmoid() * scale
# N, L, 3, 64
pos_wha = proposals[:, :, :, None] / dim_t
# N, L, 192
pos_wha = torch.stack((pos_wha[:, :, :, 0::2].sin(), pos_wha[:, :, :, 1::2].cos()),
dim=4).flatten(2)
return pos_wha
def get_proposal_pos_cls_embed(self,
proposal_cls,
num_pos_feats=64,
temperature=10000):
"""Get the position embedding of proposal."""
scale = 2 * math.pi
dim_t = torch.arange(
num_pos_feats, dtype=torch.float32, device=proposal_cls.device)
dim_t = temperature**(2 * (dim_t // 2) / num_pos_feats)
# N, L, 1
proposal_cls[:,:,:1] = proposal_cls[:,:,:1].sigmoid() * scale
# N, L, 1, 64
pos_cls = proposal_cls[:, :, :, None] / dim_t
# N, num_q, 64
pos_cls = torch.stack((pos_cls[:, :, :, 0::2].sin(), pos_cls[:, :, :, 1::2].cos()),
dim=4).flatten(2)
return pos_cls
# query position만 처리 query content는
# def get_proposal_pos_embed(self,
# proposals,
# num_pos_feats=64,
# temperature=10000):
# """Get the position embedding of proposal."""
# scale = 2 * math.pi
# dim_t = torch.arange(num_pos_feats, dtype=torch.float32, device=proposals.device)
# dim_t = 10000 ** (2 * (dim_t // 2) / num_pos_feats)
# x_embed = proposals[:, :, 0] * scale
# y_embed = proposals[:, :, 1] * scale
# pos_x = x_embed[:, :, None] / dim_t
# pos_y = y_embed[:, :, None] / dim_t
# pos_x = torch.stack((pos_x[:, :, 0::2].sin(), pos_x[:, :, 1::2].cos()), dim=3).flatten(2)
# pos_y = torch.stack((pos_y[:, :, 0::2].sin(), pos_y[:, :, 1::2].cos()), dim=3).flatten(2)
# if proposals.size(-1) == 2:
# pos = torch.cat((pos_y, pos_x), dim=2)
# elif proposals.size(-1) == 4:
# w_embed = proposals[:, :, 2] * scale
# pos_w = w_embed[:, :, None] / dim_t
# pos_w = torch.stack((pos_w[:, :, 0::2].sin(), pos_w[:, :, 1::2].cos()), dim=3).flatten(2)
# h_embed = proposals[:, :, 3] * scale
# pos_h = h_embed[:, :, None] / dim_t
# pos_h = torch.stack((pos_h[:, :, 0::2].sin(), pos_h[:, :, 1::2].cos()), dim=3).flatten(2)
# pos = torch.cat((pos_y, pos_x, pos_w, pos_h), dim=2)
# else:
# raise ValueError("Unknown pos_tensor shape(-1):{}".format(proposals.size(-1)))
# return pos
def forward(self,
mlvl_feats,
mlvl_masks,
query_embed,
mlvl_pos_embeds,
bbox_coder=None,
reg_branches=None,
cls_branches=None,
first_stage=False,
**kwargs):
"""Forward function for `Transformer`.
Args:
mlvl_feats (list(Tensor)): Input queries from
different level. Each element has shape
[bs, embed_dims, h, w].
mlvl_masks (list(Tensor)): The key_padding_mask from
different level used for encoder and decoder,
each element has shape [bs, h, w].
query_embed (Tensor): The query embedding for decoder,
with shape [num_query, c].
mlvl_pos_embeds (list(Tensor)): The positional encoding
of feats from different level, has the shape
[bs, embed_dims, h, w].
reg_branches (obj:`nn.ModuleList`): Regression heads for
feature maps from each decoder layer. Only would
be passed when
`with_box_refine` is True. Default to None.
cls_branches (obj:`nn.ModuleList`): Classification heads
for feature maps from each decoder layer. Only would
be passed when `as_two_stage`
is True. Default to None.
Returns:
tuple[Tensor]: results of decoder containing the following tensor.
- inter_states: Outputs from decoder. If
return_intermediate_dec is True output has shape \
(num_dec_layers, bs, num_query, embed_dims), else has \
shape (1, bs, num_query, embed_dims).
- init_reference_out: The initial value of reference \
points, has shape (bs, num_queries, 4).
- inter_references_out: The internal value of reference \
points in decoder, has shape \
(num_dec_layers, bs,num_query, embed_dims)
- enc_outputs_class: The classification score of \
proposals generated from \
encoder's feature maps, has shape \
(batch, h*w, num_classes). \
Only would be returned when `as_two_stage` is True, \
otherwise None.
- enc_outputs_coord_unact: The regression results \
generated from encoder's feature maps., has shape \
(batch, h*w, 4). Only would \
be returned when `as_two_stage` is True, \
otherwise None.
"""
# assert self.as_two_stage or query_embed is not None
feat_flatten = []
mask_flatten = []
lvl_pos_embed_flatten = []
spatial_shapes = []
for lvl, (feat, mask, pos_embed) in enumerate(
zip(mlvl_feats, mlvl_masks, mlvl_pos_embeds)):
bs, c, h, w = feat.shape
# pos_embed.shape = [2, 256, 128, 128]
spatial_shape = (h, w)
spatial_shapes.append(spatial_shape)
# [bs, w*h, c]
feat = feat.flatten(2).transpose(1, 2)
# [bs, w*h]
mask = mask.flatten(1)
# [bs, w*h]
pos_embed = pos_embed.flatten(2).transpose(1, 2)
lvl_pos_embed = pos_embed + self.level_embeds[lvl].view(1, 1, -1)
lvl_pos_embed_flatten.append(lvl_pos_embed)
feat_flatten.append(feat)
mask_flatten.append(mask)
feat_flatten = torch.cat(feat_flatten, 1)
mask_flatten = torch.cat(mask_flatten, 1)
lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1)
spatial_shapes = torch.as_tensor(
spatial_shapes, dtype=torch.long, device=feat_flatten.device)
level_start_index = torch.cat((spatial_shapes.new_zeros(
(1, )), spatial_shapes.prod(1).cumsum(0)[:-1]))
valid_ratios = torch.stack(
[self.get_valid_ratio(m) for m in mlvl_masks], 1)
# multi-scale reference points
reference_points = \
self.get_reference_points(spatial_shapes,
valid_ratios,
device=feat.device)
feat_flatten = feat_flatten.permute(1, 0, 2) # (H*W, bs, embed_dims)
lvl_pos_embed_flatten = lvl_pos_embed_flatten.permute(
1, 0, 2) # (H*W, bs, embed_dims)
# 21760 = 128*128+64*64+32*32+16*16 query的个数
# memory是编码后的每个query和keys在多层featuremap中对应的特征 一维特征 256
memory = self.encoder(
query=feat_flatten,
key=None,
value=None,
query_pos=lvl_pos_embed_flatten,
query_key_padding_mask=mask_flatten,
spatial_shapes=spatial_shapes,
reference_points=reference_points,
level_start_index=level_start_index,
valid_ratios=valid_ratios,
**kwargs)
memory = memory.permute(1, 0, 2)
bs, _, c = memory.shape
if self.as_two_stage:
input_hw = self.two_stage_wh_embedding.weight[0]
# learnedwh1 = self.two_stage_wh1_embedding.weight[0]
# learnedwh2 = self.two_stage_wh2_embedding.weight[0]
# learnedwh3 = self.two_stage_wh3_embedding.weight[0]
# learnedwh4 = self.two_stage_wh4_embedding.weight[0]
# learnedtheta1 = self.two_stage_theta1_embedding.weight[0]
# learnedtheta2 = self.two_stage_theta2_embedding.weight[0]
# learnedtheta3 = self.two_stage_theta3_embedding.weight[0]
# learnedtheta4 = self.two_stage_theta4_embedding.weight[0]
# output_memory, output_proposals = \
# self.gen_encoder_output_proposals(
# memory, mask_flatten, spatial_shapes, learnedwh1, learnedwh2,
# learnedwh3,learnedwh4,learnedtheta1,learnedtheta2,learnedtheta3,learnedtheta4)
output_memory, output_proposals = \
self.gen_encoder_output_proposals(
memory, mask_flatten, spatial_shapes, input_hw)
# cls score,reg output feature map 별로 짜르기
enc_outputs_class = cls_branches[self.decoder.num_layers](
output_memory)
cls_wieght = cls_branches[self.decoder.num_layers].weight
enc_outputs_coord_unact_angle= \
reg_branches[
self.decoder.num_layers](output_memory) + output_proposals
# print(enc_outputs_coord_unact_angle)
if first_stage:
return enc_outputs_coord_unact_angle
#############################
# if first_stage:
# # enc_ouputs_class_f0 = enc_outputs_class[..., 0]
# enc_ouputs_class_f0 = enc_outputs_class.clone().detach()
# enc_ouputs_class_f0[:,16384:-1,:] = -99
# enc_ouputs_class_f1 = enc_outputs_class.clone().detach()
# enc_ouputs_class_f1[:,0:16384,:] = -99
# enc_ouputs_class_f1[:,20480:-1,:] = -99
# enc_ouputs_class_f2 = enc_outputs_class.clone().detach()
# enc_ouputs_class_f2[:,0:20480,:] = -99
# enc_ouputs_class_f2[:,21504:-1,:] = -99
# enc_ouputs_class_f3 = enc_outputs_class.clone().detach()
# enc_ouputs_class_f3[:,0:21504,:] = -99
# topk_proposals0 = torch.topk(
# enc_ouputs_class_f0.max(dim=2)[0], 180, dim=1)[1]
# topk_coords_unact0 = torch.gather(
# enc_outputs_coord_unact_angle, 1,
# topk_proposals0.unsqueeze(-1).repeat(1, 1, 5))
# topk_coords_unact0 = topk_coords_unact0.detach()
# topk_proposals1 = torch.topk(
# enc_ouputs_class_f1.max(dim=2)[0], 70, dim=1)[1]
# topk_coords_unact1 = torch.gather(
# enc_outputs_coord_unact_angle, 1,
# topk_proposals1.unsqueeze(-1).repeat(1, 1, 5))
# topk_coords_unact1 = topk_coords_unact1.detach()
# topk_proposals2 = torch.topk(
# enc_ouputs_class_f2.max(dim=2)[0], 25, dim=1)[1]
# topk_coords_unact2 = torch.gather(
# enc_outputs_coord_unact_angle, 1,
# topk_proposals2.unsqueeze(-1).repeat(1, 1, 5))
# topk_coords_unact2 = topk_coords_unact2.detach()
# topk_proposals3 = torch.topk(
# enc_ouputs_class_f3.max(dim=2)[0], 25, dim=1)[1]
# topk_coords_unact3 = torch.gather(
# enc_outputs_coord_unact_angle, 1,
# topk_proposals3.unsqueeze(-1).repeat(1, 1, 5))
# topk_coords_unact3 = topk_coords_unact3.detach()
# topk_enc_outputs_coord_unact_angle = torch.cat([topk_coords_unact0, topk_coords_unact1, topk_coords_unact2, topk_coords_unact3],dim=1)
# return topk_enc_outputs_coord_unact_angle
######## clustering
# topk_1 = self.two_stage_num_proposals * 3
# topk_2 = self.two_stage_num_proposals
# for i in range(bs):
# kmeans_inx, kmeans_v = kmeans.fit_predict(memory[i])
# b_kmeans_v = kmeans_v.unsqueeze(0)
# if i==0:
# ub_kmeans_v = b_kmeans_v
# else:
# ub_kmeans_v = torch.cat([ub_kmeans_v,b_kmeans_v],dim=0)
# # print(kmeans_in.shape)
# topk_test= torch.topk(ub_kmeans_v,topk_1,dim=1)[1]
# topk_confidence = torch.gather(
# enc_outputs_class, 1,
# topk_test.unsqueeze(-1).repeat(1, 1,15))
# topk_confidence = topk_confidence.detach()
# topk_confidence_ind = torch.topk(
# topk_confidence.max(dim=2)[0], topk_2, dim=1)[1]
# # 4x300
# topk_proposals = torch.gather(topk_test,1,topk_confidence_ind.repeat(1, 1))
##### previous
############################
## topk_1을 다이나믹하게 0.5이상 다뽑기 if 0.5 넘는게 400개 안넘으면 400으로 설정
enc_out_topk_mask = enc_outputs_class.max(dim=2)[0].clone().detach()
enc_out_topk_mask = enc_out_topk_mask.sigmoid()
enc_m = nn.Threshold(0.3, 0)
enc_m1 = nn.Threshold(0.2, 0)
enc_m2 = nn.Threshold(0.1, 0)
enc_out_topk_mask1 = enc_m(enc_out_topk_mask)
enc_out_topk_mask2 = enc_m1(enc_out_topk_mask)
enc_out_topk_mask3 = enc_m2(enc_out_topk_mask)
enc_m_topk1 = torch.count_nonzero(enc_out_topk_mask1)
enc_m_topk2 = torch.count_nonzero(enc_out_topk_mask2)
enc_m_topk3 = torch.count_nonzero(enc_out_topk_mask3)
# 1500
# filter_1 = 2000
# filter_2 = 3000
# filter_3 = 4000
# filter_4 = 5000
# 1200
# filter_1 = 1800
# filter_2 = 2400
# filter_3 = 3000
# filter_4 = 4000
## 900
filter_1 = 1200
filter_2 = 1800
filter_3 = 2000
filter_4 = 3000
# 300
# filter_1 = 400
# filter_2 = 600
# filter_3 = 900
# filter_4 = 1200
# 100
# filter_1 = 150
# filter_2 = 250
# filter_3 = 350
# filter_4 = 450
# 200
# filter_1 = 250
# filter_2 = 400
# filter_3 = 600
# filter_4 = 800
if enc_m_topk3 >= filter_1:
if enc_m_topk2 >= filter_1:
if filter_1<=enc_m_topk1 <= filter_2:
topk_1 = int(enc_m_topk1)
elif enc_m_topk1 > filter_2:
topk_1 = filter_2
elif (enc_m_topk1 < filter_1) & (enc_m_topk2 <= filter_3):
topk_1 = int(enc_m_topk2)
elif (enc_m_topk1 < filter_1) & (enc_m_topk2 > filter_3):
topk_1 = filter_3
elif (enc_m_topk2 < filter_1) & (enc_m_topk3 <= filter_4):
topk_1 = int(enc_m_topk3)
elif (enc_m_topk2 < filter_1) & (enc_m_topk3 > filter_4):
topk_1 = filter_4
else:
topk_1 = filter_1
# print("-------------------------------")
# print(enc_m_topk1)
# print(enc_m_topk2)
# print(enc_m_topk3)
# print(topk_1)
# print("-------------------------------")
# topk_1 = self.two_stage_num_proposals * 2
topk_2 = self.two_stage_num_proposals
bs, fm_num, cls_num =enc_outputs_class.shape
enc_ouputs_class_topk1 = enc_outputs_class.clone().detach()
enc_outputs_class_sigmoid_topk1 = enc_ouputs_class_topk1.sigmoid()
# print(enc_ouputs_class_topk1[0][0])
# print(enc_outputs_class_sigmoid_topk1[0][0])
# exit()
# 4x900
topk_test= torch.topk(enc_ouputs_class_topk1.max(dim=2)[0],topk_1,dim=1)[1]
# 1차 proposal box 필터링 900개 중 iou(proposal, bbox)를 고려하여 confidence score 높은거 top 300 선정
# 4x21760x15
# enc_outputs_class_softmax = F.softmax(enc_ouputs_class_topk1, dim=2)
# 4x21760
enc_outputs_coord_unact_angle_sig = enc_outputs_coord_unact_angle.clone().detach()
output_proposals_sig = output_proposals.clone().detach()
enc_outputs_coord_unact_angle_sim = enc_outputs_coord_unact_angle.clone().detach()
output_proposals_sim = output_proposals.clone().detach()
# enc_outputs_coord_unact_angle_sig[...,:4] = enc_outputs_coord_unact_angle_sig[...,:4].sigmoid()
# output_proposals_sig[...,:4] = output_proposals_sig[...,:4].sigmoid()
enc_outputs_coord_unact_angle_sig = enc_outputs_coord_unact_angle_sig.sigmoid()
output_proposals_sig = output_proposals_sig.sigmoid()
## test_a = topk_test[0][0]
# # print(enc_outputs_coord_unact_angle_sig[0][test_a])
# # print(output_proposals_sig[0][test_a])
# # print(topk_test[0])
## 4x21760
ious = diff_iou_rotated_2d(enc_outputs_coord_unact_angle_sig, output_proposals_sig)
# # print(ious[0][test_a])
# # iouss = diff_iou_rotated_2d(enc_outputs_coord_unact_angle.sigmoid(), output_proposals.sigmoid())
## 4x900
top_ious = torch.gather(
ious, 1,
topk_test.repeat(1, 1))
## 4x21760x256
# enc_outputs_coord_unact_angle_vec = self.enc_output_reg_norm(self.enc_output_reg(enc_outputs_coord_unact_angle_sim))
# output_proposals_vec = self.enc_output_pro_norm(self.enc_output_pro(output_proposals_sim))
# ## 4x21760
# cos_sim = torch.nn.CosineSimilarity(dim=2)
# # cos_sim_out = cos_sim(enc_outputs_coord_unact_angle_vec, output_proposals_vec)
# cos_sim_out = cos_sim(output_memory, enc_outputs_coord_unact_angle_vec)
# # nom_cos_sim = (cos_sim_out + 1) / 2
# torch.pi = torch.acos(torch.zeros(1)).item() * 2
# nom_cos_sim = torch.arccos(cos_sim_out) / torch.pi
# # 4x900
# top_sim = torch.gather(
# nom_cos_sim, 1,
# topk_test.repeat(1, 1))
# bb = enc_outputs_class_sigmoid_topk1.max(dim=2)[0]
# test_a = topk_test[0][0]
# print(bb[0][test_a])
# print(nom_cos_sim[0][test_a])
#### topk_test unsqeeze?, topk_confidence.max(dim=2) shape check
## class 개수 만큼
# 4x900x15
# topk_confidence = torch.gather(
# enc_outputs_class_softmax, 1,
# topk_test.unsqueeze(-1).repeat(1, 1,8))
# topk_confidence = topk_confidence.detach()
topk_confidence = torch.gather(
enc_outputs_class_sigmoid_topk1, 1,
topk_test.unsqueeze(-1).repeat(1, 1,8))
topk_confidence = topk_confidence.detach()
# topk_memory
topk_memory = torch.gather(
output_memory, 1,
topk_test.unsqueeze(-1).repeat(1, 1, 256))
topk_memory = topk_memory.detach()
############################
## topk 1차 필터링에서 해당하는 memory 가져와서 sim 계산 후 score에 곱해서 topk300 선정
### 4 x topk
for i in range (bs):
cos_sim = pairwise_cosine_similarity(topk_memory[i])
top_cos_sim = cos_sim.max(dim=1)[0].unsqueeze(0)
if i ==0:
top_cos_totoal_sim = top_cos_sim
else:
top_cos_totoal_sim = torch.cat([top_cos_totoal_sim,top_cos_sim],dim=0)
############################
# # 4x300 iou
topk_confidence_ind = torch.topk(
topk_confidence.max(dim=2)[0]+top_cos_totoal_sim, topk_2, dim=1)[1]
# 4x300 cos_sim
# topk_confidence_ind = torch.topk(
# (topk_confidence.max(dim=2)[0]+(top_sim*0.5)+(top_ious*0.3)), topk_2, dim=1)[1]
# topk_confidence_ind = torch.topk(
# topk_confidence.max(dim=2)[0]*top_ious, topk_2, dim=1)[1]
# 4x300
topk_proposals = torch.gather(topk_test,1,topk_confidence_ind.repeat(1, 1))
# ###########################aa
###### previous
# topk = self.two_stage_num_proposals
# topk 어떤 클래스 비중 높은지 파악
# cls score top 900을 먼저 뽑고 어던 방법 사용해서 300개로 다시 필터링?
# topk_proposals = torch.topk(
# enc_outputs_class[..., 0], topk, dim=1)[1]
# enc_outputs_class split, topk_proposals
# topk_proposals = torch.topk(
# enc_outputs_class.max(dim=2)[0], topk, dim=1)[1]
##################
####################################
# topk_proposals_0 = torch.topk(
# enc_outputs_class[..., 0], 20, dim=1)[1]
# topk_proposals_00 = topk_proposals_0.split(1,dim=0)
# a0 = enc_outputs_class.max(dim=2)[1]
# check_0 = torch.numel(a0[a0 ==0])
# check_1 = torch.numel(a0[a0==1])
# check_2 = torch.numel(a0[a0==2])
# check_3 = torch.numel(a0[a0==3])
# check_4 = torch.numel(a0[a0==4])
# check_5 = torch.numel(a0[a0==5])
# check_6 = torch.numel(a0[a0==6])
# check_7 = torch.numel(a0[a0==7])
# check_8 = torch.numel(a0[a0==8])
# check_9 = torch.numel(a0[a0==9])
# check_10 = torch.numel(a0[a0==10])
# check_11 = torch.numel(a0[a0==11])
# check_12 = torch.numel(a0[a0==12])
# check_13 = torch.numel(a0[a0==13])
# check_14 = torch.numel(a0[a0==14])
# print(check_0,check_1,check_2,check_3,check_4,check_5,check_6,check_7,check_8,check_9,check_10,check_11,check_12,check_13,check_14)
# for i in range(bs):
# topk_proposals_0_inx = topk_proposals_00[i].squeeze(0)
# enc_outputs_class[i,topk_proposals_0_inx,:] = -99
# topk_proposals_1 = torch.topk(
# enc_outputs_class[..., 1], 40, dim=1)[1]
# # topk_proposalsss = torch.cat([topk_proposals_0,topk_proposals_1],dim=1)
# topk_proposals_01 = topk_proposals_1.split(1,dim=0)
# for i in range(bs):
# topk_proposals_1_inx = topk_proposals_01[i].squeeze(0)
# enc_outputs_class[i,topk_proposals_1_inx,:] = -99
# topk_proposals_2 = torch.topk(
# enc_outputs_class[..., 2], 20, dim=1)[1]
# topk_proposals_02 = topk_proposals_2.split(1,dim=0)
# for i in range(bs):
# topk_proposals_2_inx = topk_proposals_02[i].squeeze(0)
# enc_outputs_class[i,topk_proposals_2_inx,:] = -99
# topk_proposals_3 = torch.topk(
# enc_outputs_class[..., 3], 20, dim=1)[1]
# topk_proposals03 = topk_proposals_3.split(1,dim=0)
# for i in range(bs):
# topk_proposals3_inx = topk_proposals03[i].squeeze(0)
# enc_outputs_class[i,topk_proposals3_inx,:] = -99
# topk_proposals_4 = torch.topk(
# enc_outputs_class[..., 4], 40, dim=1)[1]
# topk_proposals_04 = topk_proposals_4.split(1,dim=0)
# for i in range(bs):
# topk_proposals_4_inx = topk_proposals_04[i].squeeze(0)
# enc_outputs_class[i,topk_proposals_4_inx,:] = -99
# topk_proposals_5 = torch.topk(
# enc_outputs_class[..., 5], 40, dim=1)[1]
# topk_proposals_05 = topk_proposals_5.split(1,dim=0)
# for i in range(bs):
# topk_proposals_5_inx = topk_proposals_05[i].squeeze(0)
# enc_outputs_class[i,topk_proposals_5_inx,:] = -99
# topk_proposals_6 = torch.topk(
# enc_outputs_class[..., 6], 40, dim=1)[1]
# topk_proposals_06 = topk_proposals_6.split(1,dim=0)
# for i in range(bs):
# topk_proposals_6_inx = topk_proposals_06[i].squeeze(0)
# enc_outputs_class[i,topk_proposals_6_inx,:] = -99
# topk_proposals_7 = torch.topk(
# enc_outputs_class[..., 7], 40, dim=1)[1]
# topk_proposals_07 = topk_proposals_7.split(1,dim=0)
# for i in range(bs):
# topk_proposals_7_inx = topk_proposals_07[i].squeeze(0)
# enc_outputs_class[i,topk_proposals_7_inx,:] = -99
# topk_proposals_8 = torch.topk(
# enc_outputs_class[..., 8], 40, dim=1)[1]
# topk_proposals_08 = topk_proposals_8.split(1,dim=0)
# for i in range(bs):
# topk_proposals_8_inx = topk_proposals_08[i].squeeze(0)
# enc_outputs_class[i,topk_proposals_8_inx,:] = -99
# topk_proposals_9 = torch.topk(
# enc_outputs_class[..., 9], 40, dim=1)[1]
# topk_proposals_09 = topk_proposals_9.split(1,dim=0)
# for i in range(bs):
# topk_proposals_9_inx = topk_proposals_09[i].squeeze(0)
# enc_outputs_class[i,topk_proposals_9_inx,:] = -99
# topk_proposals_10 = torch.topk(
# enc_outputs_class[..., 10], 20, dim=1)[1]
# topk_proposals_010 = topk_proposals_10.split(1,dim=0)
# for i in range(bs):
# topk_proposals_10_inx = topk_proposals_010[i].squeeze(0)
# enc_outputs_class[i,topk_proposals_10_inx,:] = -99
# topk_proposals_11 = torch.topk(
# enc_outputs_class[..., 11], 40, dim=1)[1]
# topk_proposals_011 = topk_proposals_11.split(1,dim=0)
# for i in range(bs):
# topk_proposals_11_inx = topk_proposals_011[i].squeeze(0)
# enc_outputs_class[i,topk_proposals_11_inx,:] = -99
# topk_proposals_12 = torch.topk(
# enc_outputs_class[..., 12], 40, dim=1)[1]
# topk_proposals_012 = topk_proposals_12.split(1,dim=0)
# for i in range(bs):
# topk_proposals_12_inx = topk_proposals_012[i].squeeze(0)
# enc_outputs_class[i,topk_proposals_12_inx,:] = -99
# topk_proposals_13 = torch.topk(
# enc_outputs_class[..., 13], 6, dim=1)[1]
# topk_proposals_013 = topk_proposals_13.split(1,dim=0)
# for i in range(bs):
# topk_proposals_13_inx = topk_proposals_013[i].squeeze(0)
# enc_outputs_class[i,topk_proposals_13_inx,:] = -99
# topk_proposals_14 = torch.topk(
# enc_outputs_class[..., 14], 7, dim=1)[1]
# topk_proposals_014 = topk_proposals_14.split(1,dim=0)
# for i in range(bs):
# topk_proposals_14_inx = topk_proposals_014[i].squeeze(0)
# enc_outputs_class[i,topk_proposals_14_inx,:] = -99
# topk_proposals = torch.cat([topk_proposals_0,topk_proposals_1,topk_proposals_2, topk_proposals_3,topk_proposals_4,topk_proposals_5,topk_proposals_6,topk_proposals_7,topk_proposals_8,
# topk_proposals_9,topk_proposals_10,topk_proposals_11,topk_proposals_12,topk_proposals_13,topk_proposals_14], dim=1)
topk_coords_unact = torch.gather(
enc_outputs_coord_unact_angle, 1,
topk_proposals.unsqueeze(-1).repeat(1, 1, 5))
topk_coords_unact = topk_coords_unact.detach()
topk_cls_score = torch.gather(
enc_outputs_class, 1,
topk_proposals.unsqueeze(-1).repeat(1, 1, 8))
topk_cls_score = topk_cls_score.detach()
# topk_cls_score = topk_cls_score.max(dim=2)[0]
# topk_cls_score = topk_cls_score.unsqueeze(-1)
# topk_class_argmax = enc_outputs_class.argmax(2)
# # topk_class_argmax = torch.as_tensor(
# # topk_class_argmax, dtype=torch.long, device=enc_outputs_class.device)
# topk_class_onehot = torch.zeros(enc_outputs_class.shape, device = enc_outputs_class.device)
# topk_class_onehot = topk_class_onehot.scatter(2,topk_class_argmax.unsqueeze(2), 1.0)
# topk_class_onehot = torch.gather(
# topk_class_onehot, 1,
# topk_proposals.unsqueeze(-1).repeat(1, 1, 15))
# topk_class_onehot = topk_class_onehot.detach()
# topk_class = torch.gather(
# output_memory, 1,
# topk_proposals.unsqueeze(-1).repeat(1, 1, 256))
# topk_class = topk_class.detach()
# quantized, indices, commit_loss = vq(topk_class, topk_class_onehot)
# quantized = quantized.detach()
# obb2xyxy
# reference_points = obb2poly_tr(topk_coords_unact).sigmoid()
reference_points = topk_coords_unact[..., :4].sigmoid()
##### theta 고려해서 reference points를 5개 줘야함
# reference_points = torch.cat(topk_coords_unact[...,:4].sigmoid(),topk_coords_unact[4])
init_reference_out = reference_points
# obb2xywh
##### theta 고려해서 norm 필요
pos_trans_out = self.pos_trans_norm(
self.pos_trans(self.get_proposal_pos_embed(topk_coords_unact[..., :4])))
# query_embed here is the content embed for deformable DETR
if not self.mixed_selection:
query_pos, query = torch.split(pos_trans_out, c, dim=2)
else:
query = query_embed.unsqueeze(0).expand(bs, -1, -1)
query_pos, _ = torch.split(pos_trans_out, c, dim=2)
# query = torch.gather(memory, 1, topk_proposals.unsqueeze(-1).repeat(1, 1, memory.size(-1)))
# query_pos = pos_trans_out
# # #################### angle, cls positioning해서 add
# ### 4x300x256
# pos_trans_xy_out = self.get_proposal_pos_xy_embed(topk_coords_unact[..., :2])
# ### 4x300x192
# pos_trans_wha_out = self.get_proposal_pos_wha_embed(topk_coords_unact[..., 2:5])
# ### 4x300x64
# pos_trans_cls_out = self.get_proposal_pos_cls_embed(topk_cls_score[...,:1])
# ### 4x300x256
# pos_trans_whacls_out = torch.cat([pos_trans_wha_out,pos_trans_cls_out],dim=2)
# pos_trans_xy_out = self.pos_trans_xy_norm(
# self.pos_trans_xy(pos_trans_xy_out))
# pos_trans_whacls_out = self.pos_trans_whacls_norm(
# self.pos_trans_whacls(pos_trans_whacls_out))
# query_pos = pos_trans_xy_out
# query = pos_trans_whacls_out
# # ######################################
## 단순 query + cls embedding
# enc_ouputs_class_embedding = topk_cls_score
# enc_ouputs_class_embedding = self.enc_output_cls_norm(self.enc_output_cls(enc_ouputs_class_embedding))
# query = query + enc_ouputs_class_embedding
## cluster
# topk_class = self.tgt_embed.weight[:, None, :].repeat(1, bs, 1)
# query = topk_class
# query_pos = self.pos_trans_norm(
# self.pos_trans(self.get_proposal_pos_embed(topk_coords_unact[..., :4])))
# query = query+quantized
else:
query_pos, query = torch.split(query_embed, c, dim=1)
query_pos = query_pos.unsqueeze(0).expand(bs, -1, -1)
query = query.unsqueeze(0).expand(bs, -1, -1)
reference_points = self.reference_points(query_pos).sigmoid()
init_reference_out = reference_points
# decoder
# if first_stage:
# vq = VectorQuantize(
# dim=256,
# codebook_size = 15,
# codebook_dim = 256,
# # orthogonal_reg_weight = 10,
# # kmeans_init= True,
# sync_codebook = False,
# )
# query = topk_memory.permute(1, 0, 2)
# if not first_stage:
# quantized, indices, commit_loss = vq(topk_class, topk_class_onehot)
# print(commit_loss)
# quantized = quantized.permute(1, 0, 2)
# query = torch.add(query,quantized)
# if first_stage:
# return enc_outputs_coord_unact_angle
query = query.permute(1, 0, 2)
memory = memory.permute(1, 0, 2)
query_pos = query_pos.permute(1, 0, 2)
inter_states, inter_references = self.decoder(
query=query,
key=None,
value=memory,
query_pos=query_pos,
key_padding_mask=mask_flatten,
reference_points=reference_points,
spatial_shapes=spatial_shapes,
level_start_index=level_start_index,
valid_ratios=valid_ratios,
reg_branches=reg_branches,
bbox_coder=bbox_coder,
**kwargs)
inter_references_out = inter_references
if self.as_two_stage:
return inter_states, init_reference_out, \
inter_references_out, enc_outputs_class, \
enc_outputs_coord_unact_angle
return inter_states, init_reference_out, \
inter_references_out, None, None
@TRANSFORMER_LAYER_SEQUENCE.register_module()
class RotatedDeformableDetrTransformerDecoder(TransformerLayerSequence):
"""Implements the decoder in DETR transformer.
Args:
return_intermediate (bool): Whether to return intermediate outputs.
coder_norm_cfg (dict): Config of last normalization layer. Default:
`LN`.
"""
def __init__(self, *args, return_intermediate=False, **kwargs):
super(RotatedDeformableDetrTransformerDecoder, self).__init__(*args, **kwargs)
self.return_intermediate = return_intermediate
def forward(self,
query,
*args,
reference_points=None,
valid_ratios=None,
reg_branches=None,
bbox_coder=None,
**kwargs):
"""Forward function for `TransformerDecoder`.
Args:
query (Tensor): Input query with shape
`(num_query, bs, embed_dims)`.
reference_points (Tensor): The reference
points of offset. has shape
(bs, num_query, 4) when as_two_stage,
otherwise has shape ((bs, num_query, 2).
valid_ratios (Tensor): The radios of valid
points on the feature map, has shape
(bs, num_levels, 2)
reg_branch: (obj:`nn.ModuleList`): Used for
refining the regression results. Only would
be passed when with_box_refine is True,
otherwise would be passed a `None`.
Returns:
Tensor: Results with shape [1, num_query, bs, embed_dims] when
return_intermediate is `False`, otherwise it has shape
[num_layers, num_query, bs, embed_dims].
"""
output = query
intermediate = []
intermediate_reference_points = []
##### theta 고려해서 reference points 5개로 각도는 valid ratios 고려 x
for lid, layer in enumerate(self.layers):
if reference_points.shape[-1] == 4:
reference_points_input = reference_points[:, :, None] * \
torch.cat([valid_ratios, valid_ratios], -1)[:, None]
else:
assert reference_points.shape[-1] == 2
reference_points_input = reference_points[:, :, None] * \
valid_ratios[:, None]
output = layer(
output,
*args,
reference_points=reference_points_input,
**kwargs)
output = output.permute(1, 0, 2)
if reg_branches is not None:
# tmp = obb2xyxy(reg_branches[lid](output), version='le90')
tmp = reg_branches[lid](output)
###### reference points 5개로 theta고려 필요
if reference_points.shape[-1] == 4:
new_reference_points = tmp[..., :4] + inverse_sigmoid(
reference_points)
new_reference_points = new_reference_points.sigmoid()
else:
assert reference_points.shape[-1] == 2
new_reference_points = tmp
new_reference_points[..., :2] = tmp[
..., :2] + inverse_sigmoid(reference_points)
new_reference_points = new_reference_points.sigmoid()
reference_points = new_reference_points.detach()
output = output.permute(1, 0, 2)
if self.return_intermediate:
intermediate.append(output)
# intermediate_reference_points.append(reference_points)
intermediate_reference_points.append(new_reference_points)
if self.return_intermediate:
return torch.stack(intermediate), torch.stack(
intermediate_reference_points)
return output, reference_points
| parkyongjun1/rotated_deformabledetr | AO2-DETR/mmrotate/models/utils/rotated_transformer.py | rotated_transformer.py | py | 59,187 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "warnings.warn",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "torch.autograd.set_detect_anomaly",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "torch.autograd",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "f... |
6797208641 | # app imports
import random
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from utils.faker_factory import faker
from ..mails import BaseMailView
class InvitationConsultantProjectMailView(BaseMailView):
template_name = 'mails/invitation_consultant_project.html'
mandatory_mail_args = [
'user', 'project_name', 'relation_name',
'is_coach',
]
optional_mail_args = ['team_name', ]
subject = _(settings.BRAND_NAME + ' - %(name)s invite')
section = 'project'
def get_subject(self, **kwargs):
is_coach = kwargs.get('is_coach')
if is_coach:
return _(settings.BRAND_NAME + ' - Team coach invite')
else:
relation_name = kwargs.get('relation_name')
return self.subject % ({'name': relation_name})
def get_mock_data(self, optional=True):
project_name = faker.word()
team_name = faker.word()
mock_data = {
'team_name': team_name,
'project_name': project_name,
'relation_name': '[relation_name]',
'user': faker.first_name(),
'is_coach': random.randint(0, 1),
'public_url': '/{}'.format(faker.uri_path()),
}
return mock_data
| tomasgarzon/exo-services | service-exo-mail/mail/mailviews/invitation_consultant_project.py | invitation_consultant_project.py | py | 1,281 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "mails.BaseMailView",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.BRAND_NAME",
"line_number": 19,
"usage_type": "attribute"
... |
35512038562 | import json
import shutil
import unittest
from src import SDF3dData, train_sdf
from src.train_utils import *
class SDFTrainTest(unittest.TestCase):
@staticmethod
def get_abs_path():
path = os.path.abspath(__file__)
parent_dir = os.path.split(path)[0]
return parent_dir
def get_dataloaders(self):
parent_dir = self.get_abs_path()
data_configs = os.path.join(parent_dir, "configs", "test_data_configs.json")
with open(data_configs, "rb") as fid:
configs = json.load(fid)
configs = configs['SDF3dData']
data_handler = SDF3dData(**configs)
train_dl, test_dl = data_handler.mesh_to_dataloader()
return train_dl, test_dl
def get_network(self):
abs_path = os.path.abspath(__file__)
parent_folder = os.path.split(os.path.split(os.path.split(abs_path)[0])[0])[0]
sys.path.append(parent_folder)
from graph_networks.src import EncodeProcessDecode
parent_dir = self.get_abs_path()
network_configs = os.path.join(parent_dir, "configs", "test_network_configs.json")
with open(network_configs, "rb") as fid:
config = json.load(fid)
model_params = config['encode_process_decode']
model = EncodeProcessDecode(**model_params)
return model
def test_graph_loss(self):
loss_func = {'sdf_loss': {'loss_func_aggr': 'l1'}}
data_parallel = False
func = get_loss_funcs(loss_func, data_parallel)
train_dl, _ = self.get_dataloaders()
data = next(iter(train_dl))
data.x = torch.norm(data.x, dim=1, keepdim=True) - 0.5
func(data)
def test_train(self):
parent_dir = self.get_abs_path()
training_configs = os.path.join(parent_dir, "configs", "test_training_configs.json")
with open(training_configs, "rb") as fid:
configs = json.load(fid)
training_params = configs['train']
model = self.get_network()
train_dataloader, test_dataloader = self.get_dataloaders()
train_sdf(model, train_dataloader, test_dataloader, **training_params)
save_dir = os.path.join(os.getcwd(), training_params['save_folder_name'])
self.assertTrue(os.path.isdir(save_dir))
shutil.rmtree(save_dir)
if __name__ == '__main__':
unittest.main() | amaleki2/graph_sdf | test/test_train.py | test_train.py | py | 2,349 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "unittest.TestCase",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "json.load",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "src.SDF3dData",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_n... |
33215612402 | import os
import torch
import cflearn
import numpy as np
# for reproduction
np.random.seed(142857)
torch.manual_seed(142857)
# preparation
data_config = {"label_name": "Survived"}
file_folder = os.path.dirname(__file__)
train_file = os.path.join(file_folder, "train.csv")
test_file = os.path.join(file_folder, "test.csv")
def write_submissions(name: str, predictions_: np.ndarray) -> None:
with open(test_file, "r") as f:
f.readline()
id_list = [line.strip().split(",")[0] for line in f]
with open(name, "w") as f:
f.write("PassengerId,Survived\n")
for test_id, prediction in zip(id_list, predictions_.ravel()):
f.write(f"{test_id},{prediction}\n")
# wide and deep
m = cflearn.make("wnd", data_config=data_config)
m.fit(train_file)
cflearn.evaluate(train_file, pipelines=m, contains_labels=True)
predictions = m.predict(test_file, contains_labels=False)
write_submissions("submissions.csv", predictions) # type: ignore
# tree linear
m = cflearn.make("tree_linear", data_config=data_config).fit(train_file)
predictions = m.predict(test_file, contains_labels=False)
write_submissions("submissions_tree_linear.csv", predictions) # type: ignore
# save & load
cflearn.save(m)
loaded = cflearn.load()["tree_linear"][0]
predictions = m.predict(test_file, contains_labels=False)
write_submissions("submissions_loaded_tree_linear.csv", predictions) # type: ignore
| TrendingTechnology/carefree-learn | examples/titanic/titanic.py | titanic.py | py | 1,421 | python | en | code | null | github-code | 36 | [
{
"api_name": "numpy.random.seed",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "torch.manual_seed",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
... |
74840516263 | import glob
import os, shutil
import numpy as np
import xml.etree.ElementTree as ET
from skimage import io, transform
from PIL import Image
import cv2
class BatchPcik():
'''
批量判断图片维度,并挑出不符合的文件至error文件夹
!!!error文件夹如果没有可以新建功能!!!
'''
def __init__(self):
self.imgdir_path = "F:/Fruit_dataset/fresh_fish/"
self.xml_path = "test/"
self.error_path = "test/error/"
self.classes = ["apple","avocado","banana","beefsteak","blueberry","carambola","cherries","chicken","coconut","durian",
"fig","fish","grape","hamimelon","hawthorn","kiwano","kiwi","lemon","litchi","longan","loquat","mango",
"mangosteen","mulberry","muskmelon","orange","pawpaw","peach","pear","pemelo","pepinomelon","persimmon",
"pineapple","pitaya","pomegranate","rambutan","strawberry","watermelon","waxberry","mix"]
def read_image(self):
w = 100
h = 100
c = 3
path = self.imgdir_path
cate = [path + x for x in os.listdir(path) if os.path.isdir(path+x)]
images = []
labels = []
for index, folder in enumerate(cate):
for im in glob.glob(folder + '/*.jpg'):
img = io.imread(im)
try:
if img.shape[2] == c:
img = transform.resize(img, (w, h))
images.append(img)
labels.append(index)
print(im)
else:
print(im, ' IS WRONG')
except:
continue
print('label %d is:' % index, folder)
return np.asarray(images, np.float32), np.asarray(labels, np.int32)
def find_wrong_pic(self, save_change="c"):
'''
处理非3通道、非RGB图片
save_change: 为`"c"`时change模式,将非3通道、非RGB图片转换为3通道RGB图片
为`"r"`时remove模式,删除不符合的图片
为`"m"`时move模式,移动图片至指定文件夹
'''
filelist = os.listdir(self.imgdir_path)
if save_change == "c":
for filename in filelist:
image_file = Image.open(self.imgdir_path + filename)
image_file.convert('RGB').save(self.imgdir_path + filename)
elif save_change == "r":
for filename in filelist:
os.remove(self.imgdir_path + filename)
elif save_change == "m":
is_exists = os.path.exists(self.error_path)
os.makedirs(self.error_path) if not is_exists else print("目录已存在")
for filename in filelist:
shutil.move(self.imgdir_path + filename, self.error_path)
else:
print("the config \"save_change\" must choose in 'c' or 'r' or 'm'")
return True
def rename (self, pic_name, batch, i_num):
'''
待修改
功能:不同文件夹内图像批量重命名,要求文件夹命名格式一致
或者文件夹内重命名,单独某一类
以上两者需要可以同时实现,分类选择
'''
filelist = os.listdir(self.imgdir_path) #获取文件路径
total_num = len(filelist) #获取文件长度(个数)
i = i_num #表示文件的命名是从1开始的
for item in filelist:
if item.endswith('.jpg'): #初始的图片的格式为jpg格式的(或者源文件是png格式及其他格式,后面的转换格式就可以调整为自己需要的格式即可)
src = os.path.join(os.path.abspath(self.imgdir_path), item)
#dst = os.path.join(os.path.abspath(self.imgdir_path), ''+ '00' str(i) + pic_name + '.jpg') #处理后的格式也为jpg格式的,当然这里可以改成png格式
dst = os.path.join(os.path.abspath(self.imgdir_path), pic_name + '_' + batch + '_' + format(str(i), '0>4s') + '.jpg') #这种情况下的命名格式为0000000.jpg形式,可以自主定义想要的格式
try:
os.rename(src, dst)
print ('converting %s to %s ...' % (src, dst))
i = i + 1
except:
continue
print ('total %d to rename & converted %d jpgs' % (total_num, i))
def rename_batch (self, batch, suffix='.jpg', i_num=1):
'''
数据集名称中标签项修改,例如:
apple_01_0001.jpg -> apple_04_0001.jpg
apple_01_0001.xml -> apple_04_0001.xml
batch: 修改后的批次名称
suffix: 文件的后缀名
i_num: 文件的序号
'''
filelist = os.listdir(self.imgdir_path) # 获取文件路径
for filename in filelist:
pic_1, _, pic_3 = filename.split('_') # 获取文件名,序号+后缀
if (pic_1 in self.classes) and filename.endswith(suffix):
src = os.path.join(os.path.abspath(self.imgdir_path), filename)
dst = os.path.join(os.path.abspath(self.imgdir_path), pic_1 + '_' + batch + '_' + pic_3)
try:
os.rename(src, dst)
print ('converting %s to %s ...' % (src, dst))
i_num = i_num + 1
except:
continue
print ('total %d to rename & converted %d jpgs' % (len(filelist), i_num-1))
def rename_dataset(self, batch, suffix='.jpg', xml_suffix='.xml'):
'''
修改数据集批次信息,包括图片名和xml标注文件名,以及标注文件内的图片名和图片路径
apple_01_0001.jpg -> apple_04_0001.jpg
apple_01_0001.xml -> apple_04_0001.xml
batch:`str`,修改后的批次号
suffix:图像的后缀名,默认为`.jpg`
xml_suffix:标注文件后缀名,默认为`.xml`
'''
filelist = os.listdir(self.imgdir_path) # 获取文件路径
for filename in filelist:
# filename已知 filename="apple_01_0001.jpg" 修改前
pic_1, _, pic_3 = filename.split('_') # apple 01 0001.jpg
seach_name = filename.split('.')[0] # 用来索引xml文件的apple_01_0001
new_pic_name = pic_1 + '_' + batch + '_' + pic_3 # 新名字apple_04_0001.jpg
xml_name = seach_name + xml_suffix # xml_name 此时为修改前 apple_01_0001.xml
xml_1, _, xml_3 = xml_name.split('_') # 提取出来后:apple 01 0001.xml
new_xml_name = xml_1 + '_' + batch + '_' + xml_3 # 重新组合批次信息,apple_04
if (pic_1 in self.classes) and filename.endswith(suffix):
src = os.path.join(os.path.abspath(self.imgdir_path), filename)
dst = os.path.join(os.path.abspath(self.imgdir_path), new_pic_name)
src_xml = os.path.join(os.path.abspath(self.xml_path), xml_name)
dst_xml = os.path.join(os.path.abspath(self.xml_path), new_xml_name)
doc = ET.parse(self.xml_path + xml_name)
root = doc.getroot()
root.find("filename").text = new_pic_name
root.find("path").text = self.imgdir_path + new_pic_name
doc.write(self.xml_path + xml_name)
try:
os.rename(src, dst)
os.rename(src_xml, dst_xml)
print("---filename:%s has been modified---"%(filename))
except:
continue
def change_xml_all(self, suffix='.jpg'):
'''
修改xml文件中的filename和path
xml文件的文件名与其内部filename和path不对应
通过xml文件名提取信息,拼装`.jpg`后缀即可
suffix:默认后缀为`.jpg`
'''
filelist = os.listdir(self.xml_path)
for xmlfile in filelist:
doc = ET.parse(self.xml_path + xmlfile)
root = doc.getroot()
alter1 = root.find('filename')
alter1.text = xmlfile.split('.')[0] + suffix
alter2 = root.find('path')
alter2.text = alter2.text.rsplit('\\', 1)[0] + '\\' + xmlfile.split('.')[0] + suffix
doc.write(self.xml_path + xmlfile)
print("---done---")
def get_train_name(self, write_path, suffix='.jpg'):
'''
读取数据集中所有图像的名称,并写入文本文件
write_path:文本文件的存放路径
'''
filelist = os.listdir(self.imgdir_path)
filelist.sort() # 原地修改
f = open(write_path, 'w')
for filename in filelist:
if filename.endswith(suffix):
write_name = filename.split('.')[0] + '\n'
f.write(write_name)
f.close()
if __name__ == "__main__":
demo = BatchPcik()
#demo.error_path = "F:/Fruit_dataset/pick_img/error_img/"
key = 2
if key == 1 :
# 测试修改批次号方法
demo.imgdir_path = "E:/fruit_server/VOCdevkit/VOC2007/Annotations/"
demo.classes = ["apple"]
batch = "04"
demo.rename_batch(batch, suffix='.xml')
elif key == 2:
#demo.classes = ["apple", "avocado", "broccoli", "carrot", "chinese-cabbage", "coconut",
# "corn", "hami-melon", "lemon", "mix" ,"onion", "orange", "pear",
# "pomegranate", "pomelo", "sweet-potato"]
demo.classes = ["hami-melon"]
for class_name in demo.classes:
demo.imgdir_path = 'E:/fruit_server/15/%s'%(class_name)
demo.rename(class_name, batch='05', i_num=1)
elif key == 3:
demo.xml_path = "E:/fruit_server/VOCdevkit/VOC2007/Annotations/"
demo.change_xml_all()
elif key == 4:
# 同时修改图片名,标注名和标注信息内的图片名、图片地址
demo.imgdir_path = "E:/fruit_server/VOCdevkit/VOC2007/JPEGImages/"
demo.xml_path = "E:/fruit_server/VOCdevkit/VOC2007/Annotations/"
demo.classes = ["apple","kiwi","mango","mangosteen","mix","orange","pear","peach","pomegranate"]
demo.rename_dataset("04")
elif key == 5:
demo.imgdir_path = "VOC2007/JPEGImages/"
write_path = "VOC2007/ImageSets/Main/train.txt"
demo.get_train_name(write_path)
elif key == 6:
demo.imgdir_path = "test/error_img/"
demo.find_wrong_pic(save_change='m')
elif key == 10:
classes.append('mix')
for class_name in classes:
dirpath = "F:/Fruit_dataset/yolo_39class/test_image/%s"%(class_name)
os.makedirs(dirpath)
print("New folder has been done!-->" + dirpath)
else:
# 测试集制作
classes.append('mix')
for class_name in classes:
batch = 'test'
i_num = 1
demo.imgdir_path = "F:/Fruit_dataset/yolo_39class/test_image/%s/"%(class_name)
demo.find_wrong_pic()
demo.rename(class_name, batch, i_num)
'''
else:
demo.imgdir_path = "F:/Fruit_dataset/meat_train/test/"
data, label = demo.read_image()
print(data.shape)
''' | CGump/dataset-tools | pick_img.py | pick_img.py | py | 11,355 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.listdir",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "glob.glob",
"line_number": ... |
14381362201 | # 1099
from typing import List
def twoSumLessThanK(nums: List[int], k: int) -> int:
nums = sorted(nums)
ans = -1
i = 0
j = len(nums) - 1
while i < j:
if nums[i] + nums[j] >= k:
j -= 1
else:
ans = max(ans, nums[i] + nums[j])
i += 1
return ans
print(twoSumLessThanK([34,23,1,24,75,33,54,8],60))
print(twoSumLessThanK([10, 20, 30], 15))
| jithindmathew/LeetCode | two-sum-less-than-k.py | two-sum-less-than-k.py | py | 455 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.List",
"line_number": 5,
"usage_type": "name"
}
] |
13998551053 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import unittest
from os import path, listdir, curdir, remove
import uproot as ur
from astropy.io import fits
from km3net_testdata import data_path
from km3irf import build_irf
class TestBuild_IRF(unittest.TestCase):
def setUp(self):
self.testdata = data_path("dst/mcv5.1.km3_numuCC.ALL.dst.bdt.10events.root")
self.init_data = build_irf.DataContainer(no_bdt=False, infile=self.testdata)
def test_apply_cuts(self):
self.init_data.apply_cuts()
assert self.init_data.df.shape[0] != None
def test_unpack_data(self):
df_test = build_irf.unpack_data(
no_bdt=False, uproot_file=ur.open(self.testdata)
)
assert (df_test.size == 110) | (df_test.size == 90)
def test_buid_aeff(self):
self.init_data.build_aeff()
size_of = path.getsize(path.join(path.abspath(curdir), "aeff.fits"))
with fits.open(path.join(path.abspath(curdir), "aeff.fits")) as file_fits:
global header_fits
header_fits = file_fits[1].header["EXTNAME"]
assert "aeff.fits" in listdir(path.abspath(curdir))
assert size_of != 0
assert header_fits == "EFFECTIVE AREA"
remove(path.join(path.abspath(curdir), "aeff.fits"))
def test_buid_psf(self):
self.init_data.build_psf()
size_of = path.getsize(path.join(path.abspath(curdir), "psf.fits"))
with fits.open(path.join(path.abspath(curdir), "psf.fits")) as file_fits:
global header_fits
header_fits = file_fits[1].header["EXTNAME"]
assert "psf.fits" in listdir(path.abspath(curdir))
assert size_of != 0
assert header_fits == "PSF_2D_TABLE"
remove(path.join(path.abspath(curdir), "psf.fits"))
def test_buid_edisp(self):
self.init_data.build_edisp()
self.file_name = "edisp.fits"
size_of = path.getsize(path.join(path.abspath(curdir), self.file_name))
with fits.open(path.join(path.abspath(curdir), self.file_name)) as file_fits:
global header_fits
header_fits = file_fits[1].header["EXTNAME"]
assert self.file_name in listdir(path.abspath(curdir))
assert size_of != 0
assert header_fits == "EDISP_2D"
remove(path.join(path.abspath(curdir), self.file_name))
if __name__ == "__main__":
unittest.main()
| KM3NeT/km3irf | tests/test_main.py | test_main.py | py | 2,400 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "unittest.TestCase",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "km3net_testdata.data_path",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "km3irf.build_irf.DataContainer",
"line_number": 15,
"usage_type": "call"
},
{
"a... |
24369652375 | from django.shortcuts import render, HttpResponseRedirect
from django.contrib.auth import login, authenticate
from .forms import SignUpForm, LoginForm, PostForm
from django.contrib.auth import authenticate, login, logout
from .models import Post
# Create your views here.
def Home(request):
return render(request, 'home.html')
def SignUp(request):
if not request.user.is_authenticated:
if request.method == 'POST':
form = SignUpForm(request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect('/login')
else:
return HttpResponseRedirect('/signup')
else:
form = SignUpForm()
return render(request, 'signup.html', {'form' : form})
else:
return HttpResponseRedirect('/')
def Login(request):
if not request.user.is_authenticated:
if request.method == 'POST':
form = LoginForm(request = request, data = request.POST)
if form.is_valid():
username = form.cleaned_data['username']
password = form.cleaned_data['password']
user = authenticate(username = username, password = password)
if user is not None:
login(request, user)
return HttpResponseRedirect('/')
else:
return HttpResponseRedirect('/login')
else:
return HttpResponseRedirect('/login')
else:
form = LoginForm()
return render(request, 'login.html', {'form' : form})
else:
return HttpResponseRedirect('/')
def Logout(request):
logout(request)
return HttpResponseRedirect('/')
def Post_art(request):
if request.user.is_authenticated:
if request.method == 'POST':
form = PostForm(request.POST)
if form.is_valid():
instance = form.save(commit=False)
instance.user_name = request.user
instance.save()
# text = form.cleaned_data['text']
# post = Post(text = text)
# post.save()
# form = PostForm()
return HttpResponseRedirect('/')
else:
return HttpResponseRedirect('/post')
else:
form = PostForm()
return render(request, 'post.html', {'form' : form})
else:
return HttpResponseRedirect('/login')
def Posts_All(request):
post_s = Post.objects.all()
return render(request, 'posts.html', {'post_s' : post_s})
| SurajLodh/TaskProduct | User/views.py | views.py | py | 2,617 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.shortcuts.render",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "forms.SignUpForm",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.HttpResponseRedirect",
"line_number": 19,
"usage_type": "call"
},
{
"ap... |
2014305094 | import speech_recognition as sr
import pyttsx3
def SpeakText(command):
engine = pyttsx3.init()
engine.say(command)
engine.runAndWait()
def CollectText(x, gram):
recognizer = sr.Recognizer()
microphone = sr.Microphone()
with microphone as source:
recognizer.adjust_for_ambient_noise(source, duration=0.3)
audio = recognizer.listen(source)
x = recognizer.recognize_sphinx(audio, grammar=gram)
x = x.lower()
return x
| Dorito-Dog/Python_Ktane_Bot | solvers/solverSpeech.py | solverSpeech.py | py | 488 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pyttsx3.init",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "speech_recognition.Recognizer",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "speech_recognition.Microphone",
"line_number": 11,
"usage_type": "call"
}
] |
43844905296 | import argparse
from dataclasses import dataclass
from decimal import *
import re
import sys
from typing import Dict, List
int_re = re.compile('[-+]?[0-9]+')
float_re = re.compile('[-+]?[0-9]+(\.[0-9]+)?')
def extract_int(l: str) -> (int, str):
m = int_re.match(l)
assert m
s = m.group()
return int(s), l[len(s):]
def extract_dec(l: str) -> (Decimal, str):
m = float_re.match(l)
assert m
s = m.group()
return Decimal(s), l[len(s):]
@dataclass
class Tool:
num: int
dia: Decimal
@dataclass
class Coordinate:
x: Decimal
y: Decimal
@dataclass
class Excellon:
tools: Dict[int, Tool]
holes: Dict[int, List[Coordinate]]
def comment(self, l):
pass
def cmd_m30(self, l):
assert self.started
self.started = False
def cmd_m48(self, l):
self.started = True
def cmd_m71(self, l):
assert self.started
self.inch = False
def cmd_m72(self, l):
assert self.started
self.inch = True
dispatch_m = {
30: cmd_m30,
48: cmd_m48,
71: cmd_m71,
72: cmd_m72
}
def cmd_m(self, l):
cmd_num, l = extract_int(l)
assert not l
self.dispatch_m[cmd_num](self, l)
def cmd_t(self, l):
assert self.started
tool_num, l = extract_int(l)
assert 1 <= tool_num <= 99
if not l:
assert tool_num in self.tools
self.current_tool_num = tool_num
return
assert l[0] == 'C'
l = l[1:]
tool_dia, l = extract_dec(l)
assert not l
assert tool_num not in self.tools
self.tools[tool_num] = Tool(tool_num, tool_dia)
self.holes[tool_num] = []
def cmd_x(self, l):
assert self.started
x, l = extract_dec(l)
assert l[0] == 'Y'
y, l = extract_dec(l[1:])
assert not l
coord = Coordinate(x / self.digits_scaling, y / self.digits_scaling)
self.holes[self.current_tool_num].append(coord)
#print(f'tool {self.current_tool_num} hole {coord}')
dispatch = { '%': comment,
'M': cmd_m,
'T': cmd_t,
'X': cmd_x
}
def parse(self, file):
self.started = False
while True:
l = file.readline()
if not l:
break
l = l.strip()
self.dispatch[l[0]](self, l[1:])
def __init__(self, digits = 5, file = None):
self.digits = digits
self.digits_scaling = Decimal(10) ** self.digits
self.tools = { }
self.holes = { }
if file is not None:
self.parse(file)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('excellon',
nargs = '?',
type = argparse.FileType('r'),
default = sys.stdin)
# currently only suppress leading zeros is suppored
parser.add_argument('-d', '--digits',
type = int,
default = 5,
help = 'number of decimal places in coordinates')
parser.add_argument('-g', '--grid',
type = str,
default = '0.1',
help = 'grid size')
parser.add_argument('-e', '--exclude-diameter',
type = str,
default = '0.012',
help = 'hole diameter excluded from checking, typically used for vias')
args = parser.parse_args()
excellon = Excellon(digits = args.digits, file = args.excellon)
exclude_dia = Decimal(args.exclude_diameter)
grid = Decimal(args.grid)
for tool_num, tool in excellon.tools.items():
# skip vias
if tool.dia == exclude_dia:
continue
for coord in excellon.holes[tool_num]:
x = coord.x
y = coord.y
if x % grid != 0 or y % grid != 0:
print(f'dia {tool.dia}: ({x}, {y})')
if __name__ == '__main__':
main()
| brouhaha/gridcheck | gridcheck.py | gridcheck.py | py | 4,133 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "re.compile",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "dataclasses.dataclass",
... |
20515420312 | import os
import PyPDF2
import openai
from flask import Flask, redirect, render_template, request, url_for
app = Flask(__name__)
# Set your API key directly
os.environ["OPENAI_API_KEY"] = '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'
openai.api_key = os.getenv("OPENAI_API_KEY")
@app.route("/", methods=["GET", "POST"])
def index():
if request.method == "POST":
# Leer el contenido del PDF cargado
pdf_file = request.files["pdf_file"]
title = request.form["title"]
pdf_content = ""
if pdf_file:
pdf_reader = PyPDF2.PdfReader(pdf_file)
for page_num in range(len(pdf_reader.pages)):
page = pdf_reader.pages[page_num]
pdf_content += page.extract_text()
# Enviar el contenido del PDF a GPT-4 para resumirlo
response = openai.ChatCompletion.create(
model="gpt-4",
messages=[
{"role": "system", "content": "un tutor de universidad 'Dpto. de Ciencias de la Computación e Inteligencia Artificial' ."},
{"role": "user", "content": generate_prompt(pdf_content, title)},
]
)
# Obtener la respuesta completa de OpenAI
result = response.choices[0].message["content"]
return render_template("index.html", result=result)
return render_template("index.html")
def generate_prompt(pdf_content, title):
return f"""Resume el trabajo académico sobre {title}.
Deja claro todo lo importante. Quiero un resumen que mantiene el 50% del trabajo:\n\n{pdf_content}"""
if __name__ == "__main__":
app.run(debug=True)
| OUABSL/Congreso | app.py | app.py | py | 1,642 | python | es | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "openai.api_key",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "os.getenv",
"line_n... |
8445360878 | import operator
import warnings
import numpy
try:
import scipy.sparse
_scipy_available = True
except ImportError:
_scipy_available = False
import cupy
from cupy._core import _accelerator
from cupy.cuda import cub
from cupy.cuda import runtime
from cupyx.scipy.sparse import _base
from cupyx.scipy.sparse import _compressed
from cupyx.scipy.sparse import _csc
from cupyx.scipy.sparse import SparseEfficiencyWarning
from cupyx.scipy.sparse import _util
class csr_matrix(_compressed._compressed_sparse_matrix):
"""Compressed Sparse Row matrix.
This can be instantiated in several ways.
``csr_matrix(D)``
``D`` is a rank-2 :class:`cupy.ndarray`.
``csr_matrix(S)``
``S`` is another sparse matrix. It is equivalent to ``S.tocsr()``.
``csr_matrix((M, N), [dtype])``
It constructs an empty matrix whose shape is ``(M, N)``. Default dtype
is float64.
``csr_matrix((data, (row, col)))``
All ``data``, ``row`` and ``col`` are one-dimenaional
:class:`cupy.ndarray`.
``csr_matrix((data, indices, indptr))``
All ``data``, ``indices`` and ``indptr`` are one-dimenaional
:class:`cupy.ndarray`.
Args:
arg1: Arguments for the initializer.
shape (tuple): Shape of a matrix. Its length must be two.
dtype: Data type. It must be an argument of :class:`numpy.dtype`.
copy (bool): If ``True``, copies of given arrays are always used.
.. seealso::
:class:`scipy.sparse.csr_matrix`
"""
format = 'csr'
def get(self, stream=None):
"""Returns a copy of the array on host memory.
Args:
stream (cupy.cuda.Stream): CUDA stream object. If it is given, the
copy runs asynchronously. Otherwise, the copy is synchronous.
Returns:
scipy.sparse.csr_matrix: Copy of the array on host memory.
"""
if not _scipy_available:
raise RuntimeError('scipy is not available')
data = self.data.get(stream)
indices = self.indices.get(stream)
indptr = self.indptr.get(stream)
return scipy.sparse.csr_matrix(
(data, indices, indptr), shape=self._shape)
def _convert_dense(self, x):
m = dense2csr(x)
return m.data, m.indices, m.indptr
def _swap(self, x, y):
return (x, y)
def _add_sparse(self, other, alpha, beta):
from cupyx import cusparse
self.sum_duplicates()
other = other.tocsr()
other.sum_duplicates()
if cusparse.check_availability('csrgeam2'):
csrgeam = cusparse.csrgeam2
elif cusparse.check_availability('csrgeam'):
csrgeam = cusparse.csrgeam
else:
raise NotImplementedError
return csrgeam(self, other, alpha, beta)
def _comparison(self, other, op, op_name):
if _util.isscalarlike(other):
data = cupy.asarray(other, dtype=self.dtype).reshape(1)
if numpy.isnan(data[0]):
if op_name == '_ne_':
return csr_matrix(cupy.ones(self.shape, dtype=numpy.bool_))
else:
return csr_matrix(self.shape, dtype=numpy.bool_)
indices = cupy.zeros((1,), dtype=numpy.int32)
indptr = cupy.arange(2, dtype=numpy.int32)
other = csr_matrix((data, indices, indptr), shape=(1, 1))
return binopt_csr(self, other, op_name)
elif _util.isdense(other):
return op(self.todense(), other)
elif isspmatrix_csr(other):
self.sum_duplicates()
other.sum_duplicates()
if op_name in ('_ne_', '_lt_', '_gt_'):
return binopt_csr(self, other, op_name)
warnings.warn(
"Comparing sparse matrices using ==, <=, and >= is "
"inefficient, try using !=, <, or > instead.",
SparseEfficiencyWarning)
if op_name == '_eq_':
opposite_op_name = '_ne_'
elif op_name == '_le_':
opposite_op_name = '_gt_'
elif op_name == '_ge_':
opposite_op_name = '_lt_'
res = binopt_csr(self, other, opposite_op_name)
out = cupy.logical_not(res.toarray())
return csr_matrix(out)
raise NotImplementedError
def __eq__(self, other):
return self._comparison(other, operator.eq, '_eq_')
def __ne__(self, other):
return self._comparison(other, operator.ne, '_ne_')
def __lt__(self, other):
return self._comparison(other, operator.lt, '_lt_')
def __gt__(self, other):
return self._comparison(other, operator.gt, '_gt_')
def __le__(self, other):
return self._comparison(other, operator.le, '_le_')
def __ge__(self, other):
return self._comparison(other, operator.ge, '_ge_')
def __mul__(self, other):
from cupyx import cusparse
if cupy.isscalar(other):
self.sum_duplicates()
return self._with_data(self.data * other)
elif isspmatrix_csr(other):
self.sum_duplicates()
other.sum_duplicates()
if cusparse.check_availability('spgemm'):
return cusparse.spgemm(self, other)
elif cusparse.check_availability('csrgemm2'):
return cusparse.csrgemm2(self, other)
elif cusparse.check_availability('csrgemm'):
return cusparse.csrgemm(self, other)
else:
raise AssertionError
elif _csc.isspmatrix_csc(other):
self.sum_duplicates()
other.sum_duplicates()
if cusparse.check_availability('csrgemm') and not runtime.is_hip:
# trans=True is still buggy as of ROCm 4.2.0
return cusparse.csrgemm(self, other.T, transb=True)
elif cusparse.check_availability('spgemm'):
b = other.tocsr()
b.sum_duplicates()
return cusparse.spgemm(self, b)
elif cusparse.check_availability('csrgemm2'):
b = other.tocsr()
b.sum_duplicates()
return cusparse.csrgemm2(self, b)
else:
raise AssertionError
elif _base.isspmatrix(other):
return self * other.tocsr()
elif _base.isdense(other):
if other.ndim == 0:
self.sum_duplicates()
return self._with_data(self.data * other)
elif other.ndim == 1:
self.sum_duplicates()
other = cupy.asfortranarray(other)
# need extra padding to ensure not stepping on the CUB bug,
# see cupy/cupy#3679 for discussion
is_cub_safe = (self.indptr.data.mem.size
> self.indptr.size * self.indptr.dtype.itemsize)
# CUB spmv is buggy since CUDA 11.0, see
# https://github.com/cupy/cupy/issues/3822#issuecomment-782607637
is_cub_safe &= (cub._get_cuda_build_version() < 11000)
for accelerator in _accelerator.get_routine_accelerators():
if (accelerator == _accelerator.ACCELERATOR_CUB
and not runtime.is_hip
and is_cub_safe and other.flags.c_contiguous):
return cub.device_csrmv(
self.shape[0], self.shape[1], self.nnz,
self.data, self.indptr, self.indices, other)
if (cusparse.check_availability('csrmvEx') and self.nnz > 0 and
cusparse.csrmvExIsAligned(self, other)):
# csrmvEx does not work if nnz == 0
csrmv = cusparse.csrmvEx
elif cusparse.check_availability('csrmv'):
csrmv = cusparse.csrmv
elif cusparse.check_availability('spmv'):
csrmv = cusparse.spmv
else:
raise AssertionError
return csrmv(self, other)
elif other.ndim == 2:
self.sum_duplicates()
if cusparse.check_availability('csrmm2'):
csrmm = cusparse.csrmm2
elif cusparse.check_availability('spmm'):
csrmm = cusparse.spmm
else:
raise AssertionError
return csrmm(self, cupy.asfortranarray(other))
else:
raise ValueError('could not interpret dimensions')
else:
return NotImplemented
def __div__(self, other):
raise NotImplementedError
def __rdiv__(self, other):
raise NotImplementedError
def __truediv__(self, other):
"""Point-wise division by another matrix, vector or scalar"""
if _util.isscalarlike(other):
dtype = self.dtype
if dtype == numpy.float32:
# Note: This is a work-around to make the output dtype the same
# as SciPy. It might be SciPy version dependent.
dtype = numpy.float64
dtype = cupy.result_type(dtype, other)
d = cupy.reciprocal(other, dtype=dtype)
return multiply_by_scalar(self, d)
elif _util.isdense(other):
other = cupy.atleast_2d(other)
other = cupy.broadcast_to(other, self.shape)
check_shape_for_pointwise_op(self.shape, other.shape)
ret = self.tocoo()
ret.data = _cupy_divide_by_dense()(
ret.data, ret.row, ret.col, ret.shape[1], other)
return ret
elif _base.isspmatrix(other):
# Note: If broadcasting is needed, an exception is raised here for
# compatibility with SciPy, as SciPy does not support broadcasting
# in the "sparse / sparse" case.
check_shape_for_pointwise_op(self.shape, other.shape,
allow_broadcasting=False)
dtype = numpy.promote_types(self.dtype, other.dtype)
if dtype.char not in 'FD':
dtype = numpy.promote_types(numpy.float64, dtype)
# Note: The following implementation converts two sparse matrices
# into dense matrices and then performs a point-wise division,
# which can use lots of memory.
self_dense = self.todense().astype(dtype, copy=False)
return self_dense / other.todense()
return NotImplemented
def __rtruediv__(self, other):
return NotImplemented
# TODO(unno): Implement check_format
def diagonal(self, k=0):
rows, cols = self.shape
ylen = min(rows + min(k, 0), cols - max(k, 0))
if ylen <= 0:
return cupy.empty(0, dtype=self.dtype)
self.sum_duplicates()
y = cupy.empty(ylen, dtype=self.dtype)
_cupy_csr_diagonal()(k, rows, cols, self.data, self.indptr,
self.indices, y)
return y
def eliminate_zeros(self):
"""Removes zero entories in place."""
from cupyx import cusparse
compress = cusparse.csr2csr_compress(self, 0)
self.data = compress.data
self.indices = compress.indices
self.indptr = compress.indptr
def _maximum_minimum(self, other, cupy_op, op_name, dense_check):
if _util.isscalarlike(other):
other = cupy.asarray(other, dtype=self.dtype)
if dense_check(other):
dtype = self.dtype
# Note: This is a work-around to make the output dtype the same
# as SciPy. It might be SciPy version dependent.
if dtype == numpy.float32:
dtype = numpy.float64
elif dtype == numpy.complex64:
dtype = numpy.complex128
dtype = cupy.result_type(dtype, other)
other = other.astype(dtype, copy=False)
# Note: The computation steps below are different from SciPy.
new_array = cupy_op(self.todense(), other)
return csr_matrix(new_array)
else:
self.sum_duplicates()
new_data = cupy_op(self.data, other)
return csr_matrix((new_data, self.indices, self.indptr),
shape=self.shape, dtype=self.dtype)
elif _util.isdense(other):
self.sum_duplicates()
other = cupy.atleast_2d(other)
return cupy_op(self.todense(), other)
elif isspmatrix_csr(other):
self.sum_duplicates()
other.sum_duplicates()
return binopt_csr(self, other, op_name)
raise NotImplementedError
def maximum(self, other):
return self._maximum_minimum(other, cupy.maximum, '_maximum_',
lambda x: x > 0)
def minimum(self, other):
return self._maximum_minimum(other, cupy.minimum, '_minimum_',
lambda x: x < 0)
def multiply(self, other):
"""Point-wise multiplication by another matrix, vector or scalar"""
if cupy.isscalar(other):
return multiply_by_scalar(self, other)
elif _util.isdense(other):
self.sum_duplicates()
other = cupy.atleast_2d(other)
return multiply_by_dense(self, other)
elif isspmatrix_csr(other):
self.sum_duplicates()
other.sum_duplicates()
return multiply_by_csr(self, other)
else:
msg = 'expected scalar, dense matrix/vector or csr matrix'
raise TypeError(msg)
# TODO(unno): Implement prune
def setdiag(self, values, k=0):
"""Set diagonal or off-diagonal elements of the array."""
rows, cols = self.shape
row_st, col_st = max(0, -k), max(0, k)
x_len = min(rows - row_st, cols - col_st)
if x_len <= 0:
raise ValueError('k exceeds matrix dimensions')
values = values.astype(self.dtype)
if values.ndim == 0:
# broadcast
x_data = cupy.full((x_len,), values, dtype=self.dtype)
else:
x_len = min(x_len, values.size)
x_data = values[:x_len]
x_indices = cupy.arange(col_st, col_st + x_len, dtype='i')
x_indptr = cupy.zeros((rows + 1,), dtype='i')
x_indptr[row_st:row_st+x_len+1] = cupy.arange(x_len+1, dtype='i')
x_indptr[row_st+x_len+1:] = x_len
x_data -= self.diagonal(k=k)[:x_len]
y = self + csr_matrix((x_data, x_indices, x_indptr), shape=self.shape)
self.data = y.data
self.indices = y.indices
self.indptr = y.indptr
def sort_indices(self):
"""Sorts the indices of this matrix *in place*.
.. warning::
Calling this function might synchronize the device.
"""
from cupyx import cusparse
if not self.has_sorted_indices:
cusparse.csrsort(self)
self.has_sorted_indices = True
def toarray(self, order=None, out=None):
"""Returns a dense matrix representing the same value.
Args:
order ({'C', 'F', None}): Whether to store data in C (row-major)
order or F (column-major) order. Default is C-order.
out: Not supported.
Returns:
cupy.ndarray: Dense array representing the same matrix.
.. seealso:: :meth:`scipy.sparse.csr_matrix.toarray`
"""
from cupyx import cusparse
order = 'C' if order is None else order.upper()
if self.nnz == 0:
return cupy.zeros(shape=self.shape, dtype=self.dtype, order=order)
if self.dtype.char not in 'fdFD':
return csr2dense(self, order)
x = self.copy()
x.has_canonical_format = False # need to enforce sum_duplicates
x.sum_duplicates()
if (cusparse.check_availability('sparseToDense')
and (not runtime.is_hip or (x.nnz > 0))):
# On HIP, nnz=0 is problematic as of ROCm 4.2.0
y = cusparse.sparseToDense(x)
if order == 'F':
return y
elif order == 'C':
return cupy.ascontiguousarray(y)
else:
raise ValueError('order not understood')
else:
# csr2dense returns F-contiguous array.
if order == 'C':
# To return C-contiguous array, it uses transpose.
return cusparse.csc2dense(x.T).T
elif order == 'F':
return cusparse.csr2dense(x)
else:
raise ValueError('order not understood')
def tobsr(self, blocksize=None, copy=False):
# TODO(unno): Implement tobsr
raise NotImplementedError
def tocoo(self, copy=False):
"""Converts the matrix to COOdinate format.
Args:
copy (bool): If ``False``, it shares data arrays as much as
possible.
Returns:
cupyx.scipy.sparse.coo_matrix: Converted matrix.
"""
from cupyx import cusparse
if copy:
data = self.data.copy()
indices = self.indices.copy()
else:
data = self.data
indices = self.indices
return cusparse.csr2coo(self, data, indices)
def tocsc(self, copy=False):
"""Converts the matrix to Compressed Sparse Column format.
Args:
copy (bool): If ``False``, it shares data arrays as much as
possible. Actually this option is ignored because all
arrays in a matrix cannot be shared in csr to csc conversion.
Returns:
cupyx.scipy.sparse.csc_matrix: Converted matrix.
"""
from cupyx import cusparse
# copy is ignored
if cusparse.check_availability('csr2csc'):
csr2csc = cusparse.csr2csc
elif cusparse.check_availability('csr2cscEx2'):
csr2csc = cusparse.csr2cscEx2
else:
raise NotImplementedError
# don't touch has_sorted_indices, as cuSPARSE made no guarantee
return csr2csc(self)
def tocsr(self, copy=False):
"""Converts the matrix to Compressed Sparse Row format.
Args:
copy (bool): If ``False``, the method returns itself.
Otherwise it makes a copy of the matrix.
Returns:
cupyx.scipy.sparse.csr_matrix: Converted matrix.
"""
if copy:
return self.copy()
else:
return self
def _tocsx(self):
"""Inverts the format.
"""
return self.tocsc()
def todia(self, copy=False):
# TODO(unno): Implement todia
raise NotImplementedError
def todok(self, copy=False):
# TODO(unno): Implement todok
raise NotImplementedError
def tolil(self, copy=False):
# TODO(unno): Implement tolil
raise NotImplementedError
def transpose(self, axes=None, copy=False):
"""Returns a transpose matrix.
Args:
axes: This option is not supported.
copy (bool): If ``True``, a returned matrix shares no data.
Otherwise, it shared data arrays as much as possible.
Returns:
cupyx.scipy.sparse.csc_matrix: `self` with the dimensions reversed.
"""
if axes is not None:
raise ValueError(
'Sparse matrices do not support an \'axes\' parameter because '
'swapping dimensions is the only logical permutation.')
shape = self.shape[1], self.shape[0]
trans = _csc.csc_matrix(
(self.data, self.indices, self.indptr), shape=shape, copy=copy)
trans.has_canonical_format = self.has_canonical_format
return trans
def getrow(self, i):
"""Returns a copy of row i of the matrix, as a (1 x n)
CSR matrix (row vector).
Args:
i (integer): Row
Returns:
cupyx.scipy.sparse.csr_matrix: Sparse matrix with single row
"""
return self._major_slice(slice(i, i + 1), copy=True)
def getcol(self, i):
"""Returns a copy of column i of the matrix, as a (m x 1)
CSR matrix (column vector).
Args:
i (integer): Column
Returns:
cupyx.scipy.sparse.csr_matrix: Sparse matrix with single column
"""
return self._minor_slice(slice(i, i + 1), copy=True)
def _get_intXarray(self, row, col):
row = slice(row, row + 1)
return self._major_slice(row)._minor_index_fancy(col)
def _get_intXslice(self, row, col):
row = slice(row, row + 1)
return self._major_slice(row)._minor_slice(col, copy=True)
def _get_sliceXint(self, row, col):
col = slice(col, col + 1)
copy = row.step in (1, None)
return self._major_slice(row)._minor_slice(col, copy=copy)
def _get_sliceXarray(self, row, col):
return self._major_slice(row)._minor_index_fancy(col)
def _get_arrayXint(self, row, col):
col = slice(col, col + 1)
return self._major_index_fancy(row)._minor_slice(col)
def _get_arrayXslice(self, row, col):
if col.step not in (1, None):
start, stop, step = col.indices(self.shape[1])
cols = cupy.arange(start, stop, step, self.indices.dtype)
return self._get_arrayXarray(row, cols)
return self._major_index_fancy(row)._minor_slice(col)
def isspmatrix_csr(x):
"""Checks if a given matrix is of CSR format.
Returns:
bool: Returns if ``x`` is :class:`cupyx.scipy.sparse.csr_matrix`.
"""
return isinstance(x, csr_matrix)
def check_shape_for_pointwise_op(a_shape, b_shape, allow_broadcasting=True):
if allow_broadcasting:
a_m, a_n = a_shape
b_m, b_n = b_shape
if not (a_m == b_m or a_m == 1 or b_m == 1):
raise ValueError('inconsistent shape')
if not (a_n == b_n or a_n == 1 or b_n == 1):
raise ValueError('inconsistent shape')
else:
if a_shape != b_shape:
raise ValueError('inconsistent shape')
def multiply_by_scalar(sp, a):
data = sp.data * a
indices = sp.indices.copy()
indptr = sp.indptr.copy()
return csr_matrix((data, indices, indptr), shape=sp.shape)
def multiply_by_dense(sp, dn):
check_shape_for_pointwise_op(sp.shape, dn.shape)
sp_m, sp_n = sp.shape
dn_m, dn_n = dn.shape
m, n = max(sp_m, dn_m), max(sp_n, dn_n)
nnz = sp.nnz * (m // sp_m) * (n // sp_n)
dtype = numpy.promote_types(sp.dtype, dn.dtype)
data = cupy.empty(nnz, dtype=dtype)
indices = cupy.empty(nnz, dtype=sp.indices.dtype)
if m > sp_m:
if n > sp_n:
indptr = cupy.arange(0, nnz+1, n, dtype=sp.indptr.dtype)
else:
indptr = cupy.arange(0, nnz+1, sp.nnz, dtype=sp.indptr.dtype)
else:
indptr = sp.indptr.copy()
if n > sp_n:
indptr *= n
# out = sp * dn
cupy_multiply_by_dense()(sp.data, sp.indptr, sp.indices, sp_m, sp_n,
dn, dn_m, dn_n, indptr, m, n, data, indices)
return csr_matrix((data, indices, indptr), shape=(m, n))
_GET_ROW_ID_ = '''
__device__ inline int get_row_id(int i, int min, int max, const int *indptr) {
int row = (min + max) / 2;
while (min < max) {
if (i < indptr[row]) {
max = row - 1;
} else if (i >= indptr[row + 1]) {
min = row + 1;
} else {
break;
}
row = (min + max) / 2;
}
return row;
}
'''
_FIND_INDEX_HOLDING_COL_IN_ROW_ = '''
__device__ inline int find_index_holding_col_in_row(
int row, int col, const int *indptr, const int *indices) {
int j_min = indptr[row];
int j_max = indptr[row+1] - 1;
while (j_min <= j_max) {
int j = (j_min + j_max) / 2;
int j_col = indices[j];
if (j_col == col) {
return j;
} else if (j_col < col) {
j_min = j + 1;
} else {
j_max = j - 1;
}
}
return -1;
}
'''
@cupy._util.memoize(for_each_device=True)
def cupy_multiply_by_dense():
return cupy.ElementwiseKernel(
'''
raw S SP_DATA, raw I SP_INDPTR, raw I SP_INDICES,
int32 SP_M, int32 SP_N,
raw D DN_DATA, int32 DN_M, int32 DN_N,
raw I OUT_INDPTR, int32 OUT_M, int32 OUT_N
''',
'O OUT_DATA, I OUT_INDICES',
'''
int i_out = i;
int m_out = get_row_id(i_out, 0, OUT_M - 1, &(OUT_INDPTR[0]));
int i_sp = i_out;
if (OUT_M > SP_M && SP_M == 1) {
i_sp -= OUT_INDPTR[m_out];
}
if (OUT_N > SP_N && SP_N == 1) {
i_sp /= OUT_N;
}
int n_out = SP_INDICES[i_sp];
if (OUT_N > SP_N && SP_N == 1) {
n_out = i_out - OUT_INDPTR[m_out];
}
int m_dn = m_out;
if (OUT_M > DN_M && DN_M == 1) {
m_dn = 0;
}
int n_dn = n_out;
if (OUT_N > DN_N && DN_N == 1) {
n_dn = 0;
}
OUT_DATA = (O)(SP_DATA[i_sp] * DN_DATA[n_dn + (DN_N * m_dn)]);
OUT_INDICES = n_out;
''',
'cupyx_scipy_sparse_csr_multiply_by_dense',
preamble=_GET_ROW_ID_
)
@cupy._util.memoize(for_each_device=True)
def _cupy_divide_by_dense():
return cupy.ElementwiseKernel(
'T data, I row, I col, I width, raw T other',
'T res',
'''
res = data / other[row * width + col]
''',
'cupyx_scipy_sparse_coo_divide_dense',
)
def multiply_by_csr(a, b):
check_shape_for_pointwise_op(a.shape, b.shape)
a_m, a_n = a.shape
b_m, b_n = b.shape
m, n = max(a_m, b_m), max(a_n, b_n)
a_nnz = a.nnz * (m // a_m) * (n // a_n)
b_nnz = b.nnz * (m // b_m) * (n // b_n)
if a_nnz > b_nnz:
return multiply_by_csr(b, a)
c_nnz = a_nnz
dtype = numpy.promote_types(a.dtype, b.dtype)
c_data = cupy.empty(c_nnz, dtype=dtype)
c_indices = cupy.empty(c_nnz, dtype=a.indices.dtype)
if m > a_m:
if n > a_n:
c_indptr = cupy.arange(0, c_nnz+1, n, dtype=a.indptr.dtype)
else:
c_indptr = cupy.arange(0, c_nnz+1, a.nnz, dtype=a.indptr.dtype)
else:
c_indptr = a.indptr.copy()
if n > a_n:
c_indptr *= n
flags = cupy.zeros(c_nnz+1, dtype=a.indices.dtype)
nnz_each_row = cupy.zeros(m+1, dtype=a.indptr.dtype)
# compute c = a * b where necessary and get sparsity pattern of matrix d
cupy_multiply_by_csr_step1()(
a.data, a.indptr, a.indices, a_m, a_n,
b.data, b.indptr, b.indices, b_m, b_n,
c_indptr, m, n, c_data, c_indices, flags, nnz_each_row)
flags = cupy.cumsum(flags, dtype=a.indptr.dtype)
d_indptr = cupy.cumsum(nnz_each_row, dtype=a.indptr.dtype)
d_nnz = int(d_indptr[-1])
d_data = cupy.empty(d_nnz, dtype=dtype)
d_indices = cupy.empty(d_nnz, dtype=a.indices.dtype)
# remove zero elements in matric c
cupy_multiply_by_csr_step2()(c_data, c_indices, flags, d_data, d_indices)
return csr_matrix((d_data, d_indices, d_indptr), shape=(m, n))
@cupy._util.memoize(for_each_device=True)
def cupy_multiply_by_csr_step1():
return cupy.ElementwiseKernel(
'''
raw A A_DATA, raw I A_INDPTR, raw I A_INDICES, int32 A_M, int32 A_N,
raw B B_DATA, raw I B_INDPTR, raw I B_INDICES, int32 B_M, int32 B_N,
raw I C_INDPTR, int32 C_M, int32 C_N
''',
'C C_DATA, I C_INDICES, raw I FLAGS, raw I NNZ_EACH_ROW',
'''
int i_c = i;
int m_c = get_row_id(i_c, 0, C_M - 1, &(C_INDPTR[0]));
int i_a = i;
if (C_M > A_M && A_M == 1) {
i_a -= C_INDPTR[m_c];
}
if (C_N > A_N && A_N == 1) {
i_a /= C_N;
}
int n_c = A_INDICES[i_a];
if (C_N > A_N && A_N == 1) {
n_c = i % C_N;
}
int m_b = m_c;
if (C_M > B_M && B_M == 1) {
m_b = 0;
}
int n_b = n_c;
if (C_N > B_N && B_N == 1) {
n_b = 0;
}
int i_b = find_index_holding_col_in_row(m_b, n_b,
&(B_INDPTR[0]), &(B_INDICES[0]));
if (i_b >= 0) {
atomicAdd(&(NNZ_EACH_ROW[m_c+1]), 1);
FLAGS[i+1] = 1;
C_DATA = (C)(A_DATA[i_a] * B_DATA[i_b]);
C_INDICES = n_c;
}
''',
'cupyx_scipy_sparse_csr_multiply_by_csr_step1',
preamble=_GET_ROW_ID_ + _FIND_INDEX_HOLDING_COL_IN_ROW_
)
@cupy._util.memoize(for_each_device=True)
def cupy_multiply_by_csr_step2():
return cupy.ElementwiseKernel(
'T C_DATA, I C_INDICES, raw I FLAGS',
'raw D D_DATA, raw I D_INDICES',
'''
int j = FLAGS[i];
if (j < FLAGS[i+1]) {
D_DATA[j] = (D)(C_DATA);
D_INDICES[j] = C_INDICES;
}
''',
'cupyx_scipy_sparse_csr_multiply_by_csr_step2'
)
_BINOPT_MAX_ = '''
__device__ inline O binopt(T in1, T in2) {
return max(in1, in2);
}
'''
_BINOPT_MIN_ = '''
__device__ inline O binopt(T in1, T in2) {
return min(in1, in2);
}
'''
_BINOPT_EQ_ = '''
__device__ inline O binopt(T in1, T in2) {
return (in1 == in2);
}
'''
_BINOPT_NE_ = '''
__device__ inline O binopt(T in1, T in2) {
return (in1 != in2);
}
'''
_BINOPT_LT_ = '''
__device__ inline O binopt(T in1, T in2) {
return (in1 < in2);
}
'''
_BINOPT_GT_ = '''
__device__ inline O binopt(T in1, T in2) {
return (in1 > in2);
}
'''
_BINOPT_LE_ = '''
__device__ inline O binopt(T in1, T in2) {
return (in1 <= in2);
}
'''
_BINOPT_GE_ = '''
__device__ inline O binopt(T in1, T in2) {
return (in1 >= in2);
}
'''
def binopt_csr(a, b, op_name):
check_shape_for_pointwise_op(a.shape, b.shape)
a_m, a_n = a.shape
b_m, b_n = b.shape
m, n = max(a_m, b_m), max(a_n, b_n)
a_nnz = a.nnz * (m // a_m) * (n // a_n)
b_nnz = b.nnz * (m // b_m) * (n // b_n)
a_info = cupy.zeros(a_nnz + 1, dtype=a.indices.dtype)
b_info = cupy.zeros(b_nnz + 1, dtype=b.indices.dtype)
a_valid = cupy.zeros(a_nnz, dtype=numpy.int8)
b_valid = cupy.zeros(b_nnz, dtype=numpy.int8)
c_indptr = cupy.zeros(m + 1, dtype=a.indptr.dtype)
in_dtype = numpy.promote_types(a.dtype, b.dtype)
a_data = a.data.astype(in_dtype, copy=False)
b_data = b.data.astype(in_dtype, copy=False)
funcs = _GET_ROW_ID_
if op_name == '_maximum_':
funcs += _BINOPT_MAX_
out_dtype = in_dtype
elif op_name == '_minimum_':
funcs += _BINOPT_MIN_
out_dtype = in_dtype
elif op_name == '_eq_':
funcs += _BINOPT_EQ_
out_dtype = numpy.bool_
elif op_name == '_ne_':
funcs += _BINOPT_NE_
out_dtype = numpy.bool_
elif op_name == '_lt_':
funcs += _BINOPT_LT_
out_dtype = numpy.bool_
elif op_name == '_gt_':
funcs += _BINOPT_GT_
out_dtype = numpy.bool_
elif op_name == '_le_':
funcs += _BINOPT_LE_
out_dtype = numpy.bool_
elif op_name == '_ge_':
funcs += _BINOPT_GE_
out_dtype = numpy.bool_
else:
raise ValueError('invalid op_name: {}'.format(op_name))
a_tmp_data = cupy.empty(a_nnz, dtype=out_dtype)
b_tmp_data = cupy.empty(b_nnz, dtype=out_dtype)
a_tmp_indices = cupy.empty(a_nnz, dtype=a.indices.dtype)
b_tmp_indices = cupy.empty(b_nnz, dtype=b.indices.dtype)
_size = a_nnz + b_nnz
cupy_binopt_csr_step1(op_name, preamble=funcs)(
m, n,
a.indptr, a.indices, a_data, a_m, a_n, a.nnz, a_nnz,
b.indptr, b.indices, b_data, b_m, b_n, b.nnz, b_nnz,
a_info, a_valid, a_tmp_indices, a_tmp_data,
b_info, b_valid, b_tmp_indices, b_tmp_data,
c_indptr, size=_size)
a_info = cupy.cumsum(a_info, dtype=a_info.dtype)
b_info = cupy.cumsum(b_info, dtype=b_info.dtype)
c_indptr = cupy.cumsum(c_indptr, dtype=c_indptr.dtype)
c_nnz = int(c_indptr[-1])
c_indices = cupy.empty(c_nnz, dtype=a.indices.dtype)
c_data = cupy.empty(c_nnz, dtype=out_dtype)
cupy_binopt_csr_step2(op_name)(
a_info, a_valid, a_tmp_indices, a_tmp_data, a_nnz,
b_info, b_valid, b_tmp_indices, b_tmp_data, b_nnz,
c_indices, c_data, size=_size)
return csr_matrix((c_data, c_indices, c_indptr), shape=(m, n))
@cupy._util.memoize(for_each_device=True)
def cupy_binopt_csr_step1(op_name, preamble=''):
name = 'cupyx_scipy_sparse_csr_binopt_' + op_name + 'step1'
return cupy.ElementwiseKernel(
'''
int32 M, int32 N,
raw I A_INDPTR, raw I A_INDICES, raw T A_DATA,
int32 A_M, int32 A_N, int32 A_NNZ_ACT, int32 A_NNZ,
raw I B_INDPTR, raw I B_INDICES, raw T B_DATA,
int32 B_M, int32 B_N, int32 B_NNZ_ACT, int32 B_NNZ
''',
'''
raw I A_INFO, raw B A_VALID, raw I A_TMP_INDICES, raw O A_TMP_DATA,
raw I B_INFO, raw B B_VALID, raw I B_TMP_INDICES, raw O B_TMP_DATA,
raw I C_INFO
''',
'''
if (i >= A_NNZ + B_NNZ) return;
const int *MY_INDPTR, *MY_INDICES; int *MY_INFO; const T *MY_DATA;
const int *OP_INDPTR, *OP_INDICES; int *OP_INFO; const T *OP_DATA;
int MY_M, MY_N, MY_NNZ_ACT, MY_NNZ;
int OP_M, OP_N, OP_NNZ_ACT, OP_NNZ;
signed char *MY_VALID; I *MY_TMP_INDICES; O *MY_TMP_DATA;
int my_j;
if (i < A_NNZ) {
// in charge of one of non-zero element of sparse matrix A
my_j = i;
MY_INDPTR = &(A_INDPTR[0]); OP_INDPTR = &(B_INDPTR[0]);
MY_INDICES = &(A_INDICES[0]); OP_INDICES = &(B_INDICES[0]);
MY_INFO = &(A_INFO[0]); OP_INFO = &(B_INFO[0]);
MY_DATA = &(A_DATA[0]); OP_DATA = &(B_DATA[0]);
MY_M = A_M; OP_M = B_M;
MY_N = A_N; OP_N = B_N;
MY_NNZ_ACT = A_NNZ_ACT; OP_NNZ_ACT = B_NNZ_ACT;
MY_NNZ = A_NNZ; OP_NNZ = B_NNZ;
MY_VALID = &(A_VALID[0]);
MY_TMP_DATA= &(A_TMP_DATA[0]);
MY_TMP_INDICES = &(A_TMP_INDICES[0]);
} else {
// in charge of one of non-zero element of sparse matrix B
my_j = i - A_NNZ;
MY_INDPTR = &(B_INDPTR[0]); OP_INDPTR = &(A_INDPTR[0]);
MY_INDICES = &(B_INDICES[0]); OP_INDICES = &(A_INDICES[0]);
MY_INFO = &(B_INFO[0]); OP_INFO = &(A_INFO[0]);
MY_DATA = &(B_DATA[0]); OP_DATA = &(A_DATA[0]);
MY_M = B_M; OP_M = A_M;
MY_N = B_N; OP_N = A_N;
MY_NNZ_ACT = B_NNZ_ACT; OP_NNZ_ACT = A_NNZ_ACT;
MY_NNZ = B_NNZ; OP_NNZ = A_NNZ;
MY_VALID = &(B_VALID[0]);
MY_TMP_DATA= &(B_TMP_DATA[0]);
MY_TMP_INDICES = &(B_TMP_INDICES[0]);
}
int _min, _max, _mid;
// get column location
int my_col;
int my_j_act = my_j;
if (MY_M == 1 && MY_M < M) {
if (MY_N == 1 && MY_N < N) my_j_act = 0;
else my_j_act = my_j % MY_NNZ_ACT;
} else {
if (MY_N == 1 && MY_N < N) my_j_act = my_j / N;
}
my_col = MY_INDICES[my_j_act];
if (MY_N == 1 && MY_N < N) {
my_col = my_j % N;
}
// get row location
int my_row = get_row_id(my_j_act, 0, MY_M - 1, &(MY_INDPTR[0]));
if (MY_M == 1 && MY_M < M) {
if (MY_N == 1 && MY_N < N) my_row = my_j / N;
else my_row = my_j / MY_NNZ_ACT;
}
int op_row = my_row;
int op_row_act = op_row;
if (OP_M == 1 && OP_M < M) {
op_row_act = 0;
}
int op_col = 0;
_min = OP_INDPTR[op_row_act];
_max = OP_INDPTR[op_row_act + 1] - 1;
int op_j_act = _min;
bool op_nz = false;
if (_min <= _max) {
if (OP_N == 1 && OP_N < N) {
op_col = my_col;
op_nz = true;
}
else {
_mid = (_min + _max) / 2;
op_col = OP_INDICES[_mid];
while (_min < _max) {
if (op_col < my_col) {
_min = _mid + 1;
} else if (op_col > my_col) {
_max = _mid;
} else {
break;
}
_mid = (_min + _max) / 2;
op_col = OP_INDICES[_mid];
}
op_j_act = _mid;
if (op_col == my_col) {
op_nz = true;
} else if (op_col < my_col) {
op_col = N;
op_j_act += 1;
}
}
}
int op_j = op_j_act;
if (OP_M == 1 && OP_M < M) {
if (OP_N == 1 && OP_N < N) {
op_j = (op_col + N * op_row) * OP_NNZ_ACT;
} else {
op_j = op_j_act + OP_NNZ_ACT * op_row;
}
} else {
if (OP_N == 1 && OP_N < N) {
op_j = op_col + N * op_j_act;
}
}
if (i < A_NNZ || !op_nz) {
T my_data = MY_DATA[my_j_act];
T op_data = 0;
if (op_nz) op_data = OP_DATA[op_j_act];
O out;
if (i < A_NNZ) out = binopt(my_data, op_data);
else out = binopt(op_data, my_data);
if (out != static_cast<O>(0)) {
MY_VALID[my_j] = 1;
MY_TMP_DATA[my_j] = out;
MY_TMP_INDICES[my_j] = my_col;
atomicAdd( &(C_INFO[my_row + 1]), 1 );
atomicAdd( &(MY_INFO[my_j + 1]), 1 );
atomicAdd( &(OP_INFO[op_j]), 1 );
}
}
''',
name, preamble=preamble,
)
@cupy._util.memoize(for_each_device=True)
def cupy_binopt_csr_step2(op_name):
name = 'cupyx_scipy_sparse_csr_binopt' + op_name + 'step2'
return cupy.ElementwiseKernel(
'''
raw I A_INFO, raw B A_VALID, raw I A_TMP_INDICES, raw O A_TMP_DATA,
int32 A_NNZ,
raw I B_INFO, raw B B_VALID, raw I B_TMP_INDICES, raw O B_TMP_DATA,
int32 B_NNZ
''',
'raw I C_INDICES, raw O C_DATA',
'''
if (i < A_NNZ) {
int j = i;
if (A_VALID[j]) {
C_INDICES[A_INFO[j]] = A_TMP_INDICES[j];
C_DATA[A_INFO[j]] = A_TMP_DATA[j];
}
} else if (i < A_NNZ + B_NNZ) {
int j = i - A_NNZ;
if (B_VALID[j]) {
C_INDICES[B_INFO[j]] = B_TMP_INDICES[j];
C_DATA[B_INFO[j]] = B_TMP_DATA[j];
}
}
''',
name,
)
def csr2dense(a, order):
out = cupy.zeros(a.shape, dtype=a.dtype, order=order)
m, n = a.shape
kern = _cupy_csr2dense(a.dtype)
kern(m, n, a.indptr, a.indices, a.data, (order == 'C'), out)
return out
@cupy._util.memoize(for_each_device=True)
def _cupy_csr2dense(dtype):
if dtype == '?':
op = "if (DATA) OUT[index] = true;"
else:
op = "atomicAdd(&OUT[index], DATA);"
return cupy.ElementwiseKernel(
'int32 M, int32 N, raw I INDPTR, I INDICES, T DATA, bool C_ORDER',
'raw T OUT',
'''
int row = get_row_id(i, 0, M - 1, &(INDPTR[0]));
int col = INDICES;
int index = C_ORDER ? col + N * row : row + M * col;
''' + op,
'cupyx_scipy_sparse_csr2dense',
preamble=_GET_ROW_ID_
)
def dense2csr(a):
from cupyx import cusparse
if a.dtype.char in 'fdFD':
if cusparse.check_availability('denseToSparse'):
return cusparse.denseToSparse(a, format='csr')
else:
return cusparse.dense2csr(a)
m, n = a.shape
a = cupy.ascontiguousarray(a)
indptr = cupy.zeros(m + 1, dtype=numpy.int32)
info = cupy.zeros(m * n + 1, dtype=numpy.int32)
cupy_dense2csr_step1()(m, n, a, indptr, info)
indptr = cupy.cumsum(indptr, dtype=numpy.int32)
info = cupy.cumsum(info, dtype=numpy.int32)
nnz = int(indptr[-1])
indices = cupy.empty(nnz, dtype=numpy.int32)
data = cupy.empty(nnz, dtype=a.dtype)
cupy_dense2csr_step2()(m, n, a, info, indices, data)
return csr_matrix((data, indices, indptr), shape=(m, n))
@cupy._util.memoize(for_each_device=True)
def cupy_dense2csr_step1():
return cupy.ElementwiseKernel(
'int32 M, int32 N, T A',
'raw I INDPTR, raw I INFO',
'''
int row = i / N;
int col = i % N;
if (A != static_cast<T>(0)) {
atomicAdd( &(INDPTR[row + 1]), 1 );
INFO[i + 1] = 1;
}
''',
'cupyx_scipy_sparse_dense2csr_step1')
@cupy._util.memoize(for_each_device=True)
def cupy_dense2csr_step2():
return cupy.ElementwiseKernel(
'int32 M, int32 N, T A, raw I INFO',
'raw I INDICES, raw T DATA',
'''
int row = i / N;
int col = i % N;
if (A != static_cast<T>(0)) {
int idx = INFO[i];
INDICES[idx] = col;
DATA[idx] = A;
}
''',
'cupyx_scipy_sparse_dense2csr_step2')
@cupy._util.memoize(for_each_device=True)
def _cupy_csr_diagonal():
return cupy.ElementwiseKernel(
'int32 k, int32 rows, int32 cols, '
'raw T data, raw I indptr, raw I indices',
'T y',
'''
int row = i;
int col = i;
if (k < 0) row -= k;
if (k > 0) col += k;
if (row >= rows || col >= cols) return;
int j = find_index_holding_col_in_row(row, col,
&(indptr[0]), &(indices[0]));
if (j >= 0) {
y = data[j];
} else {
y = static_cast<T>(0);
}
''',
'cupyx_scipy_sparse_csr_diagonal',
preamble=_FIND_INDEX_HOLDING_COL_IN_ROW_
)
| cupy/cupy | cupyx/scipy/sparse/_csr.py | _csr.py | py | 42,419 | python | en | code | 7,341 | github-code | 36 | [
{
"api_name": "cupyx.scipy.sparse._compressed._compressed_sparse_matrix",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "cupyx.scipy.sparse._compressed",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "scipy.sparse.sparse.csr_matrix",
"line_number": ... |
73491948585 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from utils.models import BaseModel, models
from utils import constants as Constants, get_choices, get_kyc_upload_path
from django.contrib.contenttypes.models import ContentType
from django.db.models.signals import post_save
from django.utils.timezone import now
from django.dispatch import receiver
from django.core.cache import cache
class Lead(BaseModel):
user = models.ForeignKey('users.user', on_delete=models.CASCADE)
contact = models.ForeignKey(
'crm.Contact', null=True, blank=True, on_delete=models.CASCADE)
campaign = models.ForeignKey(
'users.Campaign', null=True, blank=True, on_delete=models.CASCADE)
pincode = models.CharField(max_length=6, null=True)
bookmark = models.BooleanField(default=False)
is_client = models.BooleanField(default=False)
ignore = models.BooleanField(default=False)
class Meta:
ordering = ('-bookmark',)
def create_opportunity(self, validated_data):
instance = Opportunity.objects.create(
lead_id=self.id, category_id=validated_data['category_id'])
instance.update_category_opportunity(validated_data)
return instance
def get_quotes(self):
from sales.models import Quote
return Quote.objects.filter(
opportunity__lead_id=self.id,
ignore=False).exclude(status='rejected')
def __str__(self):
return "%s - Contact: %s" % (
self.user.get_full_name(),
self.contact.first_name if self.contact else 'Pending')
class Opportunity(BaseModel):
lead = models.ForeignKey('crm.Lead', on_delete=models.CASCADE)
category = models.ForeignKey('product.Category', on_delete=models.CASCADE)
details = None
def __str__(self):
return '%s: %s' % (self.category.name, self.lead.__str__())
def __init__(self, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
if hasattr(self, 'category'):
self.category_name = self.category.name.replace(' ', '').lower()
if hasattr(self, self.category_name):
self.category_opportunity = getattr(self, self.category_name)
def save(self, *args, **kw):
cache.delete('USER_CONTACTS:%s' % self.lead.user_id)
super(self.__class__, self).save(*args, **kw)
def create_category_opportunity(self):
ContentType.objects.get(
model=self.category_name, app_label='crm'
).model_class().objects.create(opportunity_id=self.id)
def calculate_suminsured(self):
self.category_opportunity.calculate_suminsured()
def get_premiums(self, **kw):
return self.category_opportunity.get_premiums()
def refresh_quote_data(self, **kw):
return self.category_opportunity.refresh_quote_data(**kw)
def get_quotes(self):
return self.quote_set.filter(ignore=False).order_by(
'%s__base_premium' % self.category_name)
def get_recommendated_quotes(self):
return self.get_quotes()[:5]
def update_fields(self, **kw):
for field in kw.keys():
setattr(self, field, kw[field])
self.save()
def update_category_opportunity(self, validated_data):
self.refresh_from_db()
category_opportunity = getattr(self, self.category_name)
fields = dict.fromkeys(Constants.CATEGORY_OPPORTUNITY_FIELDS_MAPPER[
self.category_name], None)
for field in fields.keys():
fields[field] = validated_data.get(field, getattr(
category_opportunity, field))
if isinstance(fields[field], str):
fields[field] = fields[field].lower()
category_opportunity.update_fields(**fields)
return category_opportunity
@property
def city(self):
from users.models import Pincode
pincodes = Pincode.objects.filter(pincode=self.lead.pincode)
if pincodes.exists():
return pincodes.get().city
@property
def citytier(self):
if self.lead.pincode in Constants.NCR_PINCODES or self.city in Constants.MUMBAI_AREA_TIER: # noqa
return Constants.MUMBAI_NCR_TIER
return Constants.ALL_INDIA_TIER
@property
def companies_id(self):
return self.lead.user.get_companies().values_list('id', flat=True)
class Contact(BaseModel):
user = models.ForeignKey(
'users.User', on_delete=models.CASCADE, null=True, blank=True)
address = models.ForeignKey(
'users.Address', null=True, blank=True, on_delete=models.CASCADE)
gender = models.CharField(
max_length=16, choices=get_choices(Constants.GENDER),
null=True, blank=True)
phone_no = models.CharField(max_length=40, null=True, blank=True)
first_name = models.CharField(max_length=32, blank=True)
middle_name = models.CharField(max_length=32, blank=True)
last_name = models.CharField(max_length=32, blank=True)
email = models.EmailField(max_length=64, null=True, blank=True)
dob = models.DateField(null=True, blank=True)
occupation = models.CharField(
choices=get_choices(Constants.OCCUPATION_CHOICES), null=True,
default=Constants.OCCUPATION_DEFAULT_CHOICE, blank=True, max_length=32)
marital_status = models.CharField(
choices=get_choices(
Constants.MARITAL_STATUS), max_length=32, null=True, blank=True)
annual_income = models.CharField(max_length=48, null=True, blank=True)
def save(self, *args, **kwargs):
self.first_name = self.first_name.lower()
self.last_name = self.last_name.lower()
super(self.__class__, self).save(*args, **kwargs)
def __str__(self):
full_name = self.get_full_name()
return '%s - %s' % ((
full_name if full_name else 'Parent'
), self.phone_no)
def update_fields(self, **kw):
for field in kw.keys():
setattr(self, field, kw[field])
self.save()
def get_full_name(self):
full_name = '%s %s %s' % (
self.first_name, self.middle_name, self.last_name)
return full_name.strip().title()
def upload_docs(self, validated_data, fields):
from sales.models import ProposerDocument
for field in fields:
name = validated_data[field].name.split('.')
file_name = '%s_%s_%s.%s' % (
self.id, name[0], now().date().isoformat(), name[1])
doc, created = ProposerDocument.objects.get_or_create(
document_type=field, contact_id=self.id, ignore=False)
doc.file.save(file_name, validated_data[field])
validated_data[field] = doc.file.url
return validated_data
@property
def calling_no(self):
if len(self.phone_no) == 10:
return '+91%s' % self.phone_no
if '+91' in self.phone_no and len(self.phone_no) == 13:
return self.phone_no
if '91' in self.phone_no[:2] and len(self.phone_no) > 10:
return '+%s' % self.phone_no
return self.phone_no
@property
def whatsapp_no(self):
if len(self.phone_no) == 10:
return '+91%s' % self.phone_no
if '+91' in self.phone_no and len(self.phone_no) == 13:
return self.phone_no
if '91' in self.phone_no[:2] and len(self.phone_no) > 10:
return '+' % self.phone_no
return self.phone_no
class KYCDocument(BaseModel):
contact = models.ForeignKey(
'crm.Contact', on_delete=models.CASCADE, null=True, blank=True)
document_number = models.CharField(max_length=64)
document_type = models.CharField(
choices=get_choices(Constants.KYC_DOC_TYPES), max_length=16)
file = models.FileField(
upload_to=get_kyc_upload_path, null=True, blank=True)
@receiver(post_save, sender=Opportunity, dispatch_uid="action%s" % str(now()))
def opportunity_post_save(sender, instance, created, **kwargs):
if created:
instance.create_category_opportunity()
| anjali-rao/backend-app-1 | crm/models.py | models.py | py | 8,059 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "utils.models.BaseModel",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "utils.models.models.ForeignKey",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "utils.models.models",
"line_number": 15,
"usage_type": "name"
},
{
"api_nam... |
35420476348 | from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
path_gotowe = "kolos_1/obrazy/"
path = "kolos_1/"
def szary(w, h):
t = (h, w)
tab = np.zeros(t, dtype=np.uint8)
for i in range(0, h, 1):
for j in range(0, w, 1):
tab[i, j] = (i + 3*j) % 256
return tab
obraz7 = Image.open(path_gotowe+"obraz7.jpg")
#obraz7.show()
tab_szary = szary(obraz7.size[0], obraz7.size[1])
obraz_szary = Image.fromarray(tab_szary)
#obraz_szary.show()
kanaly = Image.Image.split(obraz7)
im_r = Image.merge("RGB", (obraz_szary, kanaly[1], kanaly[2]))
im_g = Image.merge("RGB", (kanaly[0], obraz_szary, kanaly[2]))
im_b = Image.merge("RGB", (kanaly[0], kanaly[1], obraz_szary))
obrazy = [obraz7, im_r, im_g, im_b]
figure = plt.figure(figsize=(16, 16))
for i in range(len(obrazy)):
plt.subplot(2, 2, i+1)
plt.imshow(obrazy[i])
plt.savefig(path+'mix.jpg')
plt.show()
| AdrianAlbrecht/WdGM | kolos_1/zad7.py | zad7.py | py | 910 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.zeros",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image.open",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_num... |
38875856666 | import sys
sys.path.append('.')
from contextlib import contextmanager
import time
import torch
import graphnet as GNN
num_iters = 100000
NN = 128*1024
D = 128
DT = torch.float16
dev = torch.device('cuda:0')
is_cuda = dev.type == 'cuda'
#net = GNN.Mlp(3 * D, [D, D, D], layernorm=False).to(DT).to(dev)
net = torch.nn.Linear(128, 128).to(DT).to(dev)
x = torch.randn(NN, D, device=dev, dtype=DT)
t0 = time.perf_counter()
for _ in range(num_iters):
net(x)
t1 = time.perf_counter()
tt = t1 - t0
print(NN * (128*128) * 2 * num_iters / tt / 1e9)
| medav/meshgraphnets-torch | test/test_torch_mlp.py | test_torch_mlp.py | py | 555 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "sys.path.append",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 2,
"usage_type": "attribute"
},
{
"api_name": "torch.float16",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "torch.device",
"li... |
4511356839 | import openpyxl
from collections import Counter
from difflib import SequenceMatcher
from collections import OrderedDict
import time
import numpy
import sys
path = "F:\\Book1.xlsx"
wb_obj = openpyxl.load_workbook(path)
sheet_base = wb_obj.worksheets[0]
sheet_area_1 = wb_obj.worksheets[1]
sheet_area_2 = wb_obj.worksheets[2]
start = time.time()
print("Start time = " + time.strftime("%H:%M:%S", time.gmtime(start)))
time.sleep(5)
# hArr = []
# aArr = []
# for x in range(2,sheet_area_1.max_row+1):
# if sheet_area_1.cell(row=x,column=1).value not in hArr:
# hArr.append(str(sheet_area_1.cell(row=x,column=1).value))
# aArr.append(str(sheet_area_1.cell(row=x,column=2).value))
# nhArr = numpy.array(hArr)
# naArr = numpy.array(aArr)
# for i in range(0,len(nhArr)):
# sheet_area_2.cell(row=i+2,column=1).value = nhArr[i]
# sheet_area_2.cell(row=i+2,column=2).value = naArr[i]
for x in range(2,sheet_base.max_row+1):
count = 0
areaSum = 0
for i in range(2,sheet_area_2.max_row+1):
if str(sheet_area_2.cell(row=i,column=1).value) in str(sheet_base.cell(row=x,column=3).value):
count = count + 1
areaSum = areaSum + float(sheet_area_2.cell(row=i,column=2).value)
sheet_base.cell(row=x,column=6).value = areaSum
sheet_base.cell(row=x,column=7).value = count
wb_obj.save(path)
end = time.time()
print("Run time = " + time.strftime("%H:%M:%S", time.gmtime(end-start))) | ChinhTheHugger/vscode_python | excel.py | excel.py | py | 1,448 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "openpyxl.load_workbook",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "time.strftime",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "time.gmtime",
"lin... |
19417305890 | import requests
class Player:
def __init__(self, dict):
self.name = dict['name']
self.nationality = dict['nationality']
self.team = dict['team']
self.goals = dict['goals']
self.assists = dict['assists']
self.points = self.goals + self.assists
def __str__(self):
return f"{self.name:20} {self.team} {self.goals} + {self.assists} = {self.points}"
class PlayerReader:
def __init__(self, url):
self.url=url
def get_players(self):
response = requests.get(self.url).json()
players=[]
for player_dict in response:
player = Player(player_dict)
players.append(player)
return players
class PlayerStats:
def __init__(self, players):
self.players=players.get_players()
def top_scorers_by_nationality(self, national):
top_scores=[]
for player in self.players:
if player.nationality==national:
top_scores.append(player)
top_scores.sort(key=lambda p: p.points, reverse=True)
return top_scores
| alannesanni/palautusrepositorio | viikko2/nhl-reader/src/player.py | player.py | py | 1,119 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 21,
"usage_type": "call"
}
] |
30781006442 | # -*- coding: utf-8 -*-
import oss2
from oss2.credentials import EnvironmentVariableCredentialsProvider
import os
# 这里填写你的 OSS_ACCESS_KEY_ID 和 OSS_ACCESS_KEY_SECRET
os.environ['OSS_ACCESS_KEY_ID'] = ''
os.environ['OSS_ACCESS_KEY_SECRET'] = ''
auth = oss2.ProviderAuth(EnvironmentVariableCredentialsProvider())
# 这里改成你的
bucket = oss2.Bucket(auth, 'https://oss-cn-chengdu.aliyuncs.com', 'sourcedream-cloud')
# 填写Object完整路径,完整路径中不包含Bucket名称,例如testfolder/exampleobject.txt。
#
# bucket.get_object_to_file('testfolder/exampleobject.txt', 'D:\\localpath\\examplefile.txt')
for obj in oss2.ObjectIterator(bucket):
# 下面都是在判断文件是否存在 不存在就新建
dirStr = str(obj.key)
last_slash_index = dirStr.rfind('/')
result = dirStr[:last_slash_index]
if not os.path.exists(result):
os.makedirs(result)
# 下载Object到本地文件,并保存到指定的本地路径D:\\localpath\\examplefile.txt。如果指定的本地文件存在会覆盖,不存在则新建。
# 这个东西必须要文件存在才可以下载
bucket.get_object_to_file(obj.key, f'{obj.key}') | source-dream/AliyunOSS-DownloadTool | main.py | main.py | py | 1,190 | python | zh | code | 0 | github-code | 36 | [
{
"api_name": "os.environ",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "oss2.ProviderAuth",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "oss2.credentials.Envi... |
73744223785 | # -*- coding: utf-8 -*-
from __future__ import print_function
import os
import logging
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
VERSION = "1.0"
# Application definition
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'dal',
'dal_select2',
'django.contrib.admin',
'reversion',
'ielex.lexicon',
'ielex.extensional_semantics',
'ielex.website',
'django_tables2',
'wtforms.ext.django',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware', # provides APPEND_SLASH
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'ielex.middleware.NoBlankLinesMiddleware', # does this cost much speed?
'reversion.middleware.RevisionMiddleware',
)
ROOT_URLCONF = 'ielex.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['ielex/templates'],
# 'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.request',
# required by django_tables2 for sorting
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'ielex.context_processors.configuration',
'ielex.context_processors.navigation',
],
'loaders': [('django.template.loaders.cached.Loader',
['django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader']
)],
'debug': False, # reset in local_settings.py
},
},
]
# Database
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'ielex/db.sqlite3'),
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Not used with sqlite3.
'PORT': '', # Not used with sqlite3.
'ATOMIC_REQUESTS': True, # Required by sqlite3 (only)
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Amsterdam' # TODO: # For Windows users:
# need to set to same as system time zone?
USE_I18N = False # Turning this off forces Django optimizations
# to avoid loading the internationalization machinery.
# USE_L10N = True
# USE_TZ = True
SITE_ID = 1
# admin urls
LOGIN_URL = '/login/'
LOGOUT_URL = '/logout/'
LOGIN_REDIRECT_URL = '/user/'
# --- setup logger ------------------------------------------------
logging.basicConfig(
level=logging.INFO,
format='[%(asctime)s] %(levelname)s %(message)s',
handlers=[logging.StreamHandler()])
# Default values (override in local_settings.py)
LIMIT_TO = 500
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/ielex/static/'
STATIC_ROOT = os.path.join(os.path.dirname(BASE_DIR), 'static')
STATICFILES_FINDERS = (
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
)
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
DATA_UPLOAD_MAX_NUMBER_FIELDS = None
local_settings_path = os.path.join(BASE_DIR, "ielex/local_settings.py")
if not os.path.exists(local_settings_path):
# create default local settings
import random
settings_template = open(os.path.join(
BASE_DIR, "ielex/local_settings.py")).read()
key_chars = "abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)"
secret_key = "".join([random.choice(key_chars) for i in range(50)])
print(settings_template.replace("<++>", secret_key),
file=open(local_settings_path, "w"))
from ielex.local_settings import *
# Overwrite settings from env:
if os.getenv('DEBUG'):
DEBUG = os.getenv('DEBUG') == 'True'
if os.getenv('SECRET_KEY'):
SECRET_KEY = os.getenv('SECRET_KEY')
for k1, k2 in [('DB_HOST', 'HOST'),
('DB_PORT', 'PORT'),
('DB_NAME', 'NAME'),
('DB_USER', 'USER'),
('DB_PASSWORD', 'PASSWORD')]:
if os.getenv(k1):
DATABASES['default'][k2] = os.getenv(k1)
if DEBUG:
try:
import debug_toolbar
MIDDLEWARE_CLASSES += \
('debug_toolbar.middleware.DebugToolbarMiddleware',)
INTERNAL_IPS = ('127.0.0.1',)
INSTALLED_APPS += ('debug_toolbar',)
# DEBUG_TOOLBAR_CONFIG = {'INTERCEPT_REDIRECTS':False}
# Disable cached Loader:
loaders = TEMPLATES[0]['OPTIONS']['loaders']
if loaders[0][0] == 'django.template.loaders.cached.Loader':
TEMPLATES[0]['OPTIONS']['loaders'] = loaders[0][1]
print('Disabled cached Loader.')
except ImportError:
pass
TEMPLATES[0]["OPTIONS"]["debug"] = True
| lingdb/CoBL-public | ielex/settings.py | settings.py | py | 5,555 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "os.path.dirname",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_nu... |
38875732926 |
import torch
import torch.nn as nn
from torch.utils.cpp_extension import load
import os
import time
import random
import math
cur_path = os.path.dirname(os.path.realpath(__file__))
if torch.cuda.is_available():
scatter_concat_cuda = load('scatter_concat_cuda',
[f'{cur_path}/scatter_concat.cu'],
extra_cuda_cflags=['-O3', '--expt-relaxed-constexpr', '-std=c++17'],
extra_ldflags=['-O3'],
verbose=False)
import scatter_concat_cuda
else:
scatter_concat_cuda = None
print('CUDA not available, scatter_concat_cuda will not be available')
class FusedScatterConcat(torch.autograd.Function):
@staticmethod
def forward(
ctx,
node_features : torch.Tensor,
edge_features : torch.Tensor,
srcs : torch.Tensor,
dsts : torch.Tensor
):
return scatter_concat_cuda.fused_scatter_concat(
edge_features, node_features, srcs, dsts)
@staticmethod
def backward(ctx, grad): raise NotImplementedError()
def fused_scatter_concat(
node_features : torch.Tensor,
edge_features : torch.Tensor,
srcs : torch.Tensor,
dsts : torch.Tensor
):
return FusedScatterConcat.apply(node_features, edge_features, srcs, dsts)
def test_1():
dev = torch.device('cuda:0')
nf = torch.Tensor([
[1, 2],
[3, 4],
[5, 6],
]).half().to(dev)
ef = torch.Tensor([
[1, 1],
[2, 2],
[3, 3],
]).half().to(dev)
srcs = torch.LongTensor([0, 1, 2]).to(dev)
dsts = torch.LongTensor([2, 1, 0]).to(dev)
out = fused_scatter_concat(nf, ef, srcs, dsts)
print(out)
if __name__ == '__main__':
test_1()
| medav/meshgraphnets-torch | kernels/scatter_concat/kernel.py | kernel.py | py | 1,699 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "os.path.dirname",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "os.path.realpath",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available... |
43082790642 | import numpy as np
from matplotlib import pyplot as plt
def smooth(data, box_pts):
box = np.ones(box_pts)/box_pts
data_smooth = np.convolve(data, box, mode='same')
return data_smooth
filename = 'limited'
ignore_lines = True
x_min = np.inf
x_max = -np.inf
y_min = np.inf
y_max = -np.inf
z_min = np.inf
z_max = -np.inf
mins = list()
maxes = list()
max_list = list()
min_list = list()
x_acc = 0
y_acc = 0
z_acc = 0
q_acc = 0
line_counter = 0
samples_counter = 0
avg_positions = list()
avg_quality = list()
y_axis_pos = list()
diff_list = list()
with open(filename) as file:
lines = file.readlines()
for line in lines:
parse = line.split(sep=' ')
if parse[2] == "STOP":
ignore_lines = True
continue
if parse[2] == "RESUME":
ignore_lines = False
continue
if ignore_lines:
try:
x_avg = x_acc / float(line_counter)
y_avg = y_acc / float(line_counter)
z_avg = z_acc / float(line_counter)
q_avg = q_acc / float(line_counter)
# print("Averaged position: x:{:2.2f} y:{:2.2f} z:{:2.2f} avg.quality:{:2.2f}".format(x_avg, y_avg, z_avg, q_avg))
avg_positions.append((x_avg, y_avg, z_avg))
avg_quality.append(q_avg)
y_axis_pos.append(y_avg)
maxes.append((x_max, y_max, z_max))
mins.append((x_min, y_min, z_min))
except Exception as ex:
None
x_acc = 0
y_acc = 0
z_acc = 0
q_acc = 0
line_counter = 0
x_min = np.inf
x_max = -np.inf
y_min = np.inf
y_max = -np.inf
z_min = np.inf
z_max = -np.inf
continue
else:
if parse[3] == 'nan':
continue
samples_counter += 1
x = float(parse[3])
y = float(parse[4])
z = float(parse[5])
q = float(parse[7])
x_acc += x
y_acc += y
z_acc += z
q_acc += q
line_counter += 1
if x > x_max:
x_max = x
if x < x_min:
x_min = x
if y > y_max:
y_max = y
if y < y_min:
y_min = y
if z > z_max:
z_max = z
if z < z_min:
z_min = z
diff_list.append((0, 0, 0))
for i in range(len(avg_positions)-1):
pos1 = avg_positions[i]
max_e = maxes[i]
min_e = mins[i]
pos2 = avg_positions[i+1]
q1 = avg_quality[i]
q2 = avg_quality[i+1]
a_q = (q1 + q2) / 2.0
diff = tuple(map(lambda a, b: a - b, pos2, pos1))
max_err = tuple(map(lambda a, b: np.abs(a-b), max_e, pos1))
min_err = tuple(map(lambda a, b: np.abs(a-b), min_e, pos1))
diff_list.append(diff)
max_list.append(max_err)
min_list.append(min_err)
print("Sample:{} difference:{} quality:{}".format(i, diff, a_q))
avg_diff = np.average(diff_list[1:], axis=0)
ground_truth = [0, 0.3, 0]
print("Average difference:", avg_diff)
print("Samples counter: ", samples_counter)
for l1, l2 in zip(ground_truth, avg_diff):
print("Error: {:2.5f} cm".format(100*np.abs(l2-l1)))
plt.plot(max_list)
plt.show()
print(np.average(max_list, axis=0))
print(np.average(min_list, axis=0))
plt.plot(min_list)
plt.show()
plt.plot(maxes, linewidth=2)
plt.plot(mins, linewidth=2)
plt.axis([0, 36, -1, 11])
plt.axis([0, 65, -3, 17])
plt.legend(["Oś X - szerokość", "Oś Y - długość", "Oś Z - wysokość"], fontsize='xx-large')
plt.xlabel("Numer próbki", fontsize='xx-large')
plt.ylabel("Wartość próbki [m]", fontsize='xx-large')
plt.show()
plt.plot(y_axis_pos, avg_quality, color="black", linewidth=2)
plt.axis([-2.7, 16, 45, 95])
plt.xlabel("Oś Y [m]", fontsize='xx-large')
plt.ylabel("Jakość pozycji [%]")
plt.plot((0, 0), (45, 100), color='green', linewidth=3)
plt.plot((10.8, 10.8), (45, 100), color='green', linewidth=3)
plt.legend(["Jakość pozycji [%]", "Położenie anchorów"], fontsize='xx-large')
plt.show()
| dlech97/master-thesis | process_log.py | process_log.py | py | 3,749 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.ones",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "numpy.convolve",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.inf",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "numpy.inf",
"line_number":... |
11841445530 | import torch
from torch import nn
from spdnet.spd import Normalize
class GBMS_RNN(nn.Module):
def __init__(self, bandwidth=0.1, normalize=True):
super(GBMS_RNN, self).__init__()
self.bandwidth = nn.Parameter(torch.tensor(bandwidth))
self.normalize = None
if normalize:
self.normalize = Normalize()
def forward(self, X):
bandwidth = self.bandwidth
if self.normalize:
W = torch.exp((X @ X.transpose(-2, -1) - 1) / (bandwidth * bandwidth))
else:
pair_dis = torch.cdist(X, X)
pair_dis_square = pair_dis**2
W = torch.exp(-0.5 * pair_dis_square / (bandwidth * bandwidth))
D = W.sum(dim=-1).diag_embed()
D_inv = D.inverse()
X = (X.transpose(-2, -1) @ W @ D_inv).transpose(-2, -1)
if self.normalize:
X = self.normalize(X)
output = X
return output
def cosine_similarity(input):
output = input @ input.transpose(-2, -1) * 0.5 + 0.5
return output
def similarity_loss(input, targets, alpha=0):
similarity = cosine_similarity(input)
identity_matrix = targets.unsqueeze(-2) == targets.unsqueeze(-2).transpose(-2, -1)
loss = (1 - similarity) * identity_matrix + torch.clamp(
similarity - alpha, min=0
) * (~identity_matrix)
loss = torch.mean(loss)
return loss
| Dandy5721/CPD-Net | MICCAI-2021/mean_shift/mean_shift.py | mean_shift.py | py | 1,419 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "torch.nn.Parameter",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_nu... |
17930873755 | #!/usr/bin/python
import os
import sys
myPath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.dirname(myPath))
import pytest
import src.pre_commit as pre_commit
import subprocess
from mock import patch
commit_is_ready_params = [
(["test/foo.py"], []),
(["test/clean.c"], []),
("", []),
]
@pytest.mark.parametrize("input, expected", commit_is_ready_params)
def test_commit_is_ready(input, expected):
with patch('subprocess.check_output') as mock_check:
mock_check.return_value = ""
with patch('subprocess.call') as mock_call:
mock_call.return_value = 0
with patch('src.pre_commit.file_is_checkable') as mock_checkable:
mock_checkable.return_value = True
result = pre_commit.commit_is_ready(input)
assert expected == result
file_exists_params = [
("test/clean.c", True),
("test/bar.c", False),
]
@pytest.mark.parametrize("input, expected", file_exists_params)
def test_file_exists(input, expected):
result = pre_commit.file_exists(input)
assert expected == result
is_source_file_params = [
("foo.c", True),
("foo.h", True),
("foo.txt", False),
]
@pytest.mark.parametrize("input, expected", is_source_file_params)
def test_is_source_file(input, expected):
result = pre_commit.is_source_file(input)
assert expected == result
is_checked_file_pattern_params = [
("lib/some_lib/some_file.c", True), # lib directory is checked
("lib/some_lib/aws_header.h", True), # check aws headers
("lib/some_lib/task.h", False), # Ignore kenrel headers
("demos/some_board/some_file.c", False), # demo board directories are not checked
]
@pytest.mark.parametrize("input, expected", is_checked_file_pattern_params)
def test_is_checked_file_pattern(input, expected):
result = pre_commit.is_checked_file_pattern(input)
assert expected == result
is_ignored_file_pattern_params = [
("lib/third_party/some_lib/some_file.c", True),
("lib/FreeRTOS/some_file.c", True),
("lib/FreeRTOS-Plus-TCP/some_file.c", True),
("lib/include/some_file.h", False),
]
@pytest.mark.parametrize("input, expected", is_ignored_file_pattern_params)
def test_is_ignored_file_pattern(input, expected):
result = pre_commit.is_ignored_file_pattern(input)
assert expected == result
file_is_checkable_params = [
("lib/some_lib/some_file.c", True), # File is in directory that is checked
("lib/some_lib/some_file.py", False), # File is not c source file
("foo/some_file.c", False), # File is not in checked directory
("lib/third_party/some_file.c", False), # File is in ignored sub-directory
]
@pytest.mark.parametrize("input, expected", file_is_checkable_params)
def test_file_is_checkable(input, expected):
result = pre_commit.file_is_checkable(input)
assert expected == result
def test_check_whitespace_calls_git():
with patch('subprocess.call') as mock:
mock.return_value = 0
assert [] == pre_commit.check_whitespace(["foo.c"])
mock.assert_called_once_with(
"git diff-index --check --cached HEAD", shell=True)
def test_check_whitespace_returns_false_on_error():
with patch('subprocess.call') as mock:
# Error code returned from git
mock.return_value = 2
assert ['whitespace'] == pre_commit.check_whitespace(["foo.c"])
check_uncrustify_params = [
(0, []),
(1, ["foo.c"]),
(-1, ["foo.c"]),
(99, ["foo.c"]),
]
@pytest.mark.parametrize("return_code, expected", check_uncrustify_params)
def test_check_uncrustify(return_code, expected):
with patch('subprocess.call') as mock:
mock.return_value = return_code
assert expected == pre_commit.check_uncrustify(["foo.c"])
mock.assert_called_once_with(
"uncrustify --check -q -c .uncrustify.cfg foo.c", shell=True)
get_modified_files_params = [
("foo.c\nbar.h\nbaz.py", ["foo.c", "bar.h", "baz.py"]),
(b"foo.c\nbar.h\nbaz.py", ["foo.c", "bar.h", "baz.py"]),
]
@pytest.mark.parametrize("git_result, expected", get_modified_files_params)
def test_get_modified_files(git_result, expected):
with patch('subprocess.check_output') as mock:
mock.return_value = git_result
result = pre_commit.get_modified_files()
assert expected == result
check_hungarian_notation_params = [
(0, []),
(1, ["foo.c", "bar.h"]),
(-1, ["foo.c", "bar.h"]),
(256, ["foo.c", "bar.h"]),
]
@pytest.mark.parametrize("return_code, expected", check_hungarian_notation_params)
def test_check_hungarian_notation(return_code, expected):
with patch('subprocess.call') as mock:
mock.return_value = return_code
result = pre_commit.check_hungarian_notation(["foo.c", "bar.h"])
assert expected == result
patch_uncrustify_params = [
("test/unformatted.c", '"a/test/unformatted.c"'),
("test/clean.c", ''),
]
@pytest.mark.parametrize("input, expected", patch_uncrustify_params)
def test_patch_uncrustify(input, expected):
result = pre_commit.patch_uncrustify(input)
assert expected in result
| aws/amazon-freertos | tools/git/hooks/test/test_pre_commit.py | test_pre_commit.py | py | 5,105 | python | en | code | 2,543 | github-code | 36 | [
{
"api_name": "os.path.dirname",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "sys.path.insert",
"line... |
22805925879 | import fiftyone as fo
import fiftyone.zoo as foz
dataset = foz.load_zoo_dataset("quickstart")
# Create a custom App config
app_config = fo.AppConfig()
app_config.show_confidence = True
app_config.show_attributes = True
session = fo.launch_app(dataset, config=app_config, port=5151)
session.wait() | patharanordev/ds51vis | sample.py | sample.py | py | 300 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "fiftyone.zoo.load_zoo_dataset",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "fiftyone.zoo",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "fiftyone.AppConfig",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "fiftyone.l... |
10759107890 | import unittest
import sys
import time
sys.path.append("..")
from deltarest import DeltaRESTAdapter, DeltaRESTService
from pyspark.sql import SparkSession
class Test(unittest.TestCase):
root_dir: str = f"/tmp/delta_rest_test_{int(time.time())}"
spark: SparkSession = None
dra: DeltaRESTAdapter
@classmethod
def setUpClass(cls):
cls.spark = SparkSession \
.builder \
.appName("unit_tests") \
.master("local") \
.config("spark.jars.packages", "io.delta:delta-core_2.12:0.8.0") \
.config("spark.sql.jsonGenerator.ignoreNullFields", "false") \
.getOrCreate()
cls.dra = DeltaRESTAdapter(cls.root_dir, 10)
@classmethod
def tearDownClass(cls):
cls.spark.stop()
# os.rmdir(cls.root_dir)
def test_scenario(self):
# PUT
self.assertEqual(
'{"message":"Table foo created"}',
bytes.decode(self.dra.put("/tables/foo").response[0], "utf-8")
)
self.assertEqual(
'{"message":"Table foo already exists"}',
bytes.decode(self.dra.put("/tables/foo").response[0], "utf-8")
)
self.assertEqual(
'{"message":"Table bar created"}',
bytes.decode(self.dra.put("/tables/bar").response[0], "utf-8")
)
# POST
self.assertEqual(
'{"message":"Rows created"}',
bytes.decode(self.dra.post(
"/tables/foo",
{"rows": [
{"id": 1},
{"id": 2}
]}
).response[0], "utf-8")
)
# schema merging
self.assertEqual(
'{"message":"Rows created"}',
bytes.decode(self.dra.post(
"/tables/foo",
{"rows": [
{"collection": [1, 2]},
{"collection": [3, 4]}
]}
).response[0], "utf-8")
)
# post rows in a table not created yet
self.assertEqual(
'{"message":"Table foo_ not found"}',
bytes.decode(self.dra.post(
"/tables/foo_",
{"rows": [
{"id": 1, "collection": [1, 2]},
{"id": 2, "collection": [3, 4]}
]}
).response[0], "utf-8")
)
# GET
# get full table
self.assertEqual(
'{"rows":[{"id":null,"collection":[1,2]},' +
'{"id":null,"collection":[3,4]},' +
'{"id":1,"collection":null},{"id":2,"collection":null}]}',
bytes.decode(
self.dra.get("/tables/foo").response[0],
"utf-8"
)
)
# get with query on a table
self.assertEqual(
'{"rows":[{"count":2,"collections_concat_size":4}]}',
bytes.decode(
self.dra.get(
"""/tables/foo?sql=SELECT
count(id) as count,
sum(size(ifnull(collection, array()))) as collections_concat_size
FROM foo"""
).response[0],
"utf-8"
)
)
# get with query on tables
self.assertEqual(
'{"rows":[{"count":16}]}',
bytes.decode(
self.dra.get(
"""/tables?sql=SELECT
count(*) as count
FROM foo as t1 CROSS JOIN foo as t2
LIMIT 100"""
).response[0],
"utf-8"
)
)
# get tables names listing
self.assertEqual(
'{"tables":["bar","foo"]}',
bytes.decode(
self.dra.get("/tables").response[0],
"utf-8"
)
)
# def test_service(self):
# DeltaRESTService(self.root_dir).run("0.0.0.0", "4444")
| bonnal-enzo/delta-rest | test/test.py | test.py | py | 3,975 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.path.append",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "unittest.TestCase",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"l... |
31071015439 | from core_algo.SGA import SGA
import alarm
import sys
import json
import time
from tabulate import tabulate
def average_res(ga, cal_times=100, **params):
cost_sum = 0
runtime_sum = 0
gen_sum = 0
speed_sum = 0
for _ in range(cal_times):
# solve the question
res, runtime, last_gen = ga.solve(**params)
# update data
cost_sum += res[-1][1][0]
runtime_sum += runtime
gen_sum += last_gen
speed_sum += (res[0][1][0] - res[-1][1][0]) / last_gen
return (
cost_sum / cal_times,
runtime_sum / cal_times,
gen_sum / cal_times,
speed_sum / cal_times,
)
def print_table(base, compare, header, filename=""):
tabular_data = []
tabular_data.append(["base", *base])
for k, v in compare.items():
tabular_data.append([k, *v])
table = tabulate(tabular_data, headers=["", *header])
print(table)
if filename:
with open(filename, "w") as f:
f.write(table)
def param_compare():
# get test data filename
data = {}
filename = input("test data file: ")
with open(f"input_with_location/test_{filename}.in.json") as f:
data = json.load(f)
# get output filename
output_filename = input("output filename: ")
# init output file
print_file = f"param_tbl/{output_filename}_{filename}_{round(time.time())}.tbl"
# get params by input
select_method = input("choose a select method(*rws, tourn): ") or "rws"
select_args = []
if select_method == "tourn":
select_args = [4, 0.7]
xo_method = input("choose a crossover method(*pmx, cbx): ") or "pmx"
# get average times
average_times = int(input("average times(*100): ") or 100)
# init base params while create new GA instance
base_inst_params = {
"recovery_rate": 0,
"pop_num": 50,
"pfih_rate": 0,
"data": data,
"select_method": select_method,
"select_args": select_args,
"xo_method": xo_method,
}
# init solve params
solve_params = {
# "convergence": {
# "max_gen": 500,
# "min_gen": 100,
# "observe_gen": 100,
# "mode": "dev",
# },
"time-2500": {"max_gen": 1800, "limit_time": 2.5, "mode": "dev"},
}
# init compare params
compare_params_dict = {
"00r-50p-00pf-100el": {},
"04-recovery": {"recovery_rate": 0.04},
"08-recovery": {"recovery_rate": 0.08},
"04-pfih": {"pfih_rate": 0.04},
"08-pfih": {"pfih_rate": 0.08},
"12-pfih": {"pfih_rate": 0.12},
}
for iteration_mode, param in solve_params.items():
compare_res = []
base_res = []
for k, v in compare_params_dict.items():
inst_params = {**base_inst_params, **v}
ga = SGA(**inst_params)
cost, runtime, gen, _ = average_res(ga, average_times, **param)
if k == "00r-50p-00pf-100el":
base_res = [cost, runtime]
compare_res.append([k, cost, runtime, gen])
else:
base_cost, base_runtime = base_res
compare_res.append(
[
k,
round((cost - base_cost) * 100 / base_cost, 2),
round((runtime - base_runtime) * 100 / base_runtime, 2),
gen,
]
)
headers = ["", "cost(%)", "runtime(%)", "generation"]
tbl = tabulate(compare_res, headers=headers)
print(tbl)
with open(print_file, "a") as f:
f.write(f"{iteration_mode}\n")
f.write(f"{tbl}\n\n")
def method_compare():
# get test data filename
data = {}
filename = input("test data file: ")
with open(f"input_with_location/test_{filename}.in.json") as f:
data = json.load(f)
# get output filename
output_filename = input("output filename: ")
# init output file
print_file = f"method_tbl/{output_filename}_{filename}_{round(time.time())}.tbl"
# get average times
average_times = int(input("average times(*100): ") or 100)
# init base params while create new GA instance
base_inst_params = {
"recovery_rate": 0,
"pop_num": 50,
"pfih_rate": 0,
"data": data,
"select_method": "rws",
"select_args": [],
"xo_method": "pmx",
}
# init solve params
solve_params = {
"convergence": {
"max_gen": 500,
"min_gen": 100,
"observe_gen": 100,
"mode": "dev",
},
"time-2500": {"max_gen": 1800, "limit_time": 2.5, "mode": "dev"},
}
# init compare params
compare_params_dict = {
"obx-rws": {},
"obx-tourn": {"select_method": "tourn", "select_args": [4, 0.7]},
"cbx-rws": {"xo_method": "cbx"},
"cbx-tourn": {
"select_method": "tourn",
"select_args": [4, 0.7],
"xo_method": "cbx",
},
"cbx-rws-rec03": {"xo_method": "cbx", "recovery_rate": 0.03},
"cbx-tourn-rec03": {
"select_method": "tourn",
"select_args": [4, 0.7],
"xo_method": "cbx",
"recovery_rate": 0.03,
},
}
for iteration_mode, param in solve_params.items():
compare_res = []
base_res = []
for k, v in compare_params_dict.items():
inst_params = {**base_inst_params, **v}
ga = SGA(**inst_params)
cost, runtime, gen, speed = average_res(ga, average_times, **param)
if k == "obx-rws":
base_res = [cost, runtime]
compare_res.append([k, cost, 0, runtime, 0, gen, speed])
else:
base_cost, base_runtime = base_res
compare_res.append(
[
k,
cost,
round((cost - base_cost) * 100 / base_cost, 2),
runtime,
round((runtime - base_runtime) * 100 / base_runtime, 2),
gen,
speed,
]
)
headers = [
"",
"cost",
"cost(%)",
"runtime",
"runtime(%)",
"generation",
"con speed",
]
tbl = tabulate(compare_res, headers=headers)
print(tbl)
with open(print_file, "a") as f:
f.write(f"{iteration_mode}\n")
f.write(f"{tbl}\n\n")
def main():
if len(sys.argv) > 1:
if sys.argv[1] == "param":
param_compare()
elif sys.argv[1] == "method":
method_compare()
else:
param_compare()
method_compare()
else:
param_compare()
method_compare()
# notify program is finished
alarm.main()
if __name__ == "__main__":
main()
| UTP-project/core-algo | exp_compare.py | exp_compare.py | py | 7,044 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tabulate.tabulate",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "core_algo.SGA.SGA",
"line_n... |
8272041392 | """
Test the OOD DiscDist2 scorer. It disentangle the feature into discriminative space and the residual space
"""
import pdb
import numpy as np
import matplotlib.pyplot as plt
import os
import argparse
import torch
from tqdm import tqdm
import torch.backends.cudnn as cudnn
from ood_scores.get_scorers import get_scorer
from utils.argparser import OODArgs
from dataset.dataloaders import get_loaders_for_ood
from models.get_models import get_model
from utils.metrics import get_measures
from utils.utils import print_measures, load_features, get_feat_dims
np.random.seed(10)
# ==================== Prepare
# args
argparser = OODArgs()
args = argparser.get_args()
print(args)
# scorer
scorer = get_scorer(args.score, args)
# dataloaders
id_train_loader, id_test_loader, ood_loaders = get_loaders_for_ood(args)
# feature dims
net = get_model(arch=args.arch, args=args, load_file=args.load_file, strict=True)
net.eval()
if args.ngpu > 1:
net = torch.nn.DataParallel(net, device_ids=list(range(args.ngpu)))
elif args.ngpu > 0:
net.cuda()
# torch.cuda.manual_seed(1)
device = "cuda" if torch.cuda.is_available() else "cpu"
cudnn.benchmark = True # fire on all cylinders
embed_mode = "supcon" in args.arch or "clip" in args.arch
proj_dim, featdims = get_feat_dims(args, net, embed_mode=embed_mode)
last_start_idx = featdims[:-1].sum() #<- For getting the last feature only
# ==================== Load id_train features and prepare the scorer
# Load id-train feature and append
id_train_feat, id_train_logit, id_train_label = load_features(args, id=True, split="train", last_idx=last_start_idx, \
featdim=featdims[-1], projdim=proj_dim, feat_space=args.feat_space)
scorer.append_features(id_train_feat, id_train_label)
print(f"The ID data number: {scorer.N}; Feature dim: {scorer.D}; Class number: {scorer.num_class}")
scorer.fit()
print("Scorer fitting done.")
# ==================== Load id test features and cal ood score
print("Calculating the in distribution OOD scores...")
id_test_feat, id_test_logit, id_test_label = load_features(args, id=True, split="test", last_idx=last_start_idx, \
featdim=featdims[-1], projdim=proj_dim, feat_space=args.feat_space)
if args.run_together:
id_scores = scorer.cal_score(id_test_feat, return_all=True)
else:
N = id_test_feat.shape[0]
id_scores = []
id_scores_disc = []
id_scores_residual = []
for i in tqdm(range(N)):
id_score_this, id_score_disc_this, id_score_residual_this = scorer.cal_score(id_test_feat[i, :], return_all=True)
id_scores.append(id_score_this)
id_scores_disc.append(id_score_disc_this)
id_scores_residual.append(id_score_residual_this)
id_scores = np.array(id_scores)
id_scores_disc = np.array(id_scores_disc)
id_scores_residual = np.array(id_scores_residual)
# ==================== Load ood test features, cal ood score, and evaluate ood_performance
aurocs, fprs = [], []
aurocs_disc, fprs_disc = [], []
aurocs_residual, fprs_residual = [], []
ood_names = [ood_loader.dataset.name for ood_loader in ood_loaders]
for ood_name in ood_names:
print(f"\n\n{ood_name} OOD Detection")
# load feature
ood_feat, ood_logit = load_features(args, id=False, ood_name=ood_name, last_idx=last_start_idx, \
featdim=featdims[-1], projdim=proj_dim, feat_space=args.feat_space)
# calculate ood_scores
if args.run_together:
ood_scores = scorer.cal_score(ood_feat, return_all=True)
else:
N = ood_feat.shape[0]
ood_scores = []
ood_scores_disc = []
ood_scores_residual = []
for i in tqdm(range(N)):
ood_score_this, ood_score_disc_this, ood_score_residual_this = scorer.cal_score(ood_feat[i, :], return_all=True)
ood_scores.append(ood_score_this)
ood_scores_disc.append(ood_score_disc_this)
ood_scores_residual.append(ood_score_residual_this)
ood_scores = np.array(ood_scores)
ood_scores_disc = np.array(ood_scores_disc)
ood_scores_residual = np.array(ood_scores_residual)
# evaluate - Use all the OOD samples
for id_s, ood_s, name, aurocs_bin, fprs_bin in zip(
[id_scores_disc, id_scores_residual, id_scores],
[ood_scores_disc, ood_scores_residual, ood_scores],
["DiscSpace", "ResidualSpace", "Together"],
[aurocs_disc, aurocs_residual, aurocs],
[fprs_disc, fprs_residual, fprs],
):
auroc_this, _, fpr_this = get_measures(id_s, ood_s)
aurocs_bin.append(auroc_this)
fprs_bin.append(fpr_this)
# print
print_measures(auroc=auroc_this, fpr=fpr_this, method_name=name)
# the mean performance
print("\n\n")
print("Mean results:")
for name, aurocs_bin, fprs_bin in zip(
["DiscSpace", "ResidualSpace", "Together"],
[aurocs_disc, aurocs_residual, aurocs],
[fprs_disc, fprs_residual, fprs],
):
auroc = np.array(aurocs_bin).mean()
fpr = np.array(fprs_bin).mean()
print_measures(auroc=auroc, fpr=fpr, method_name=name)
| ivalab/WDiscOOD | test_feat_disc.py | test_feat_disc.py | py | 5,166 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "numpy.random.seed",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "utils.argparser.OODArgs",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "ood_score... |
25969718175 | # -*- coding: utf-8 -*-
from django.http import HttpResponse
from django.template import RequestContext, Template
from django.views.decorators.csrf import csrf_exempt
from django.utils.encoding import smart_str, smart_unicode
import xml.etree.ElementTree as ET
import urllib, urllib2, time, hashlib
@csrf_exempt
def handle_request(request):
if request.method == 'GET':
response = HttpResponse(check_signature(request),content_type="text/plain")
return response
elif request.method == 'POST':
response = HttpResponse(response_msg(request),content_type="application/xml")
return response
else:
return HttpResponse("Hello world ")
def check_signature(request):
token = "wangqihui0324"
signature = request.GET.get("signature")
timestamp = request.GET.get("timestamp")
nonce = request.GET.get("nonce")
echostr = request.GET.get("echostr")
signature_tmp = [token,timestamp,nonce]
#lexicographical sorting
signature_tmp.sort()
#string concatenation
# signature_tmp = ''.join(signature_tmp)
signature_tmp = "%s%s%s" % tuple(signature_tmp)
#sha1 encryption
signature_tmp = hashlib.sha1(signature_tmp).hexdigest()
#compare with signature
if signature_tmp == signature:
return echostr
else:
return HttpResponse("Hello world ! ")
def response_msg(request):
#get post message
msg = request.body
#parser xml format
msg_xml = ET.fromstring(msg)
msg = {}
for element in msg_xml:
msg[element.tag] = smart_str(element.text)
# content = msg.get('Content')
# url_get_base = "http://api.ltp-cloud.com/analysis/?"
# api_key = "YourApikey"
# format = "plain"
# pattern = "pos"
#call for ltp-cloud api
# result = urllib2.urlopen(url_get_base + 'api_key='+api_key+'&text='+content+'&format='+format+'&pattern='+pattern)
# content = result.read().strip()
content = "1"
response = "<xml><ToUserName><![CDATA[%s]]></ToUserName><FromUserName><![CDATA[%s]]></FromUserName><CreateTime>%d</CreateTime><MsgType><![CDATA[%s]]></MsgType><Content><![CDATA[%s]]></Content></xml>"
#generate xml formt response
response = response % (msg['FromUserName'],msg['ToUserName'],int(time.time()),'text',content)
return response
| wqh872081365/weixin0324 | weixin0324/views1.py | views1.py | py | 2,306 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.http.HttpResponse",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 21,
"usage_type": "call"
},
{
"api_na... |
22355151722 | # coding: utf-8
import tkinter as tk
from tkinter import messagebox, filedialog
import os
from PIL import Image, ImageTk
from detector import PlateDetector
from util import resized_size
class LPRGUI:
max_image_width = 600
max_image_height = 600
def __init__(self):
self.detector = PlateDetector(chinese_cnn_path='./models/chinese/79.pth',
others_cnn_path='./models/others/49.pth')
root = tk.Tk()
root.title('Licence Plate Recognition')
root.geometry('800x800')
root.resizable(0, 0)
self.plate_number_var = tk.StringVar(root, value='None')
self.add_gap(root, 20)
# Show image
self.image_canvas = tk.Canvas(root, width=self.max_image_width, height=self.max_image_height, bg='#E0E0E0')
self.image_canvas.pack()
self.tk_image = None
self.add_gap(root, 20)
# Show detected plate number
frame = tk.Frame(root)
frame.pack()
tk.Label(frame, text='Plate Number:', font=('Times New Roman', 20)).grid(row=0, column=0)
tk.Label(frame, textvariable=self.plate_number_var, font=('Times New Roman', 20)).grid(row=0, column=1)
# Load image button
self.load_button = tk.Button(root, text="Load Image", width=16, command=self.on_load_image)
self.load_button.pack(expand=True)
# Detect button
self.detect_button = tk.Button(root, text="Detect", width=16, command=self.on_detect)
self.detect_button.pack(expand=True)
self.detect_button['state'] = 'disabled'
# Clear button
self.clear_button = tk.Button(root, text="Clear", width=16, command=self.on_clear)
self.clear_button.pack(expand=True)
print("-------------init success-------------")
root.mainloop()
def on_load_image(self):
file_path = filedialog.askopenfilename(title='Load Image',
filetypes=[('Image Files', '*.jfif *.jpg *.png *.gif'),
('All Files', '*')],
initialdir=os.getcwd())
assert os.path.exists(file_path)
image = Image.open(file_path)
self.draw_canvas(image=image)
self.detector.load_img(file_path)
self.detect_button['state'] = 'active'
self.plate_number_var.set('None')
def on_detect(self):
try:
self.detector.find_plate_location()
self.draw_canvas(Image.fromarray(self.detector.img_after_detected[..., ::-1]))
self.detector.split_characters()
self.detector.classify_characters()
self.plate_number_var.set(''.join(self.detector.result_list))
except Exception:
messagebox.showwarning('Error', 'Detection failed!')
def on_clear(self):
self.tk_image = None
self.image_canvas.delete('all')
self.detector.clear_img()
self.detect_button['state'] = 'disabled'
self.plate_number_var.set('None')
def draw_canvas(self, image):
self.image_canvas.delete('all')
image = image.resize(resized_size(image.size, (self.max_image_width, self.max_image_height), mode='scale'))
self.tk_image = ImageTk.PhotoImage(image)
horizontal_padding, vertical_padding = 0, 0
if image.width < self.max_image_width:
horizontal_padding = (self.max_image_width - image.width) // 2
else:
vertical_padding = (self.max_image_height - image.height) // 2
image_item = self.image_canvas.create_image(horizontal_padding, vertical_padding, anchor='nw')
self.image_canvas.itemconfig(image_item, image=self.tk_image)
@classmethod
def add_gap(cls, root, height):
tk.Frame(root, height=height).pack()
if __name__ == '__main__':
LPRGUI()
| QQQQQby/Car-Plate-Recognition | start_gui.py | start_gui.py | py | 3,908 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "detector.PlateDetector",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "tkinter.Tk",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "tkinter.StringVar",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "tkinter.Canvas",
... |
506681210 | """
Resample Raster Files
"""
def match_cellsize_and_clip(rstBands, refRaster, outFolder,
clipgeo=None, isint=None, ws=None):
"""
Resample images to make them with the same resolution and clip
Good to resample Sentinel bands with more than 10 meters.
Dependencies:
* GRASS GIS;
* GDAL/OGR.
"""
import os
from glass.prop.df import is_rst
from glass.prop.prj import rst_epsg
from glass.wenv.grs import run_grass
from glass.pys.oss import fprop, mkdir
from glass.pys.tm import now_as_str
# Check if outfolder exists
if not os.path.exists(outFolder):
mkdir(outFolder, overwrite=None)
# Get EPSG from refRaster
epsg = rst_epsg(refRaster, returnIsProj=None)
"""
Start GRASS GIS Session
"""
ws = mkdir(os.path.join(outFolder, now_as_str())) \
if not ws else ws
grsb = run_grass(
ws, grassBIN='grass78', location='resample',
srs=epsg
)
import grass.script.setup as gsetup
gsetup.init(grsb, ws, 'resample', 'PERMANENT')
"""
Import packages related with GRASS GIS
"""
from glass.it.rst import rst_to_grs, grs_to_rst, grs_to_mask
from glass.wenv.grs import rst_to_region
from glass.it.shp import shp_to_grs
from glass.dtt.torst import grsshp_to_grsrst as shp_to_rst
# Send Ref Raster to GRASS GIS and set region
extRst = rst_to_grs(refRaster, 'ext_rst')
rst_to_region(extRst)
# Import all bands in rstBands
grs_bands = [rst_to_grs(i, fprop(i, 'fn')) for i in rstBands]
if clipgeo:
clip_is_rst = is_rst(clipgeo)
# Add clipgeo to GRASS
if not clip_is_rst:
if '.gdb' in clipgeo:
lyr = os.path.basename(clipgeo)
clipgeo = os.path.dirname(clipgeo)
if clipgeo[-4:] != '.gdb':
clipgeo = os.path.dirname(clipgeo)
else:
lyr = None
grs_clip = shp_to_grs(clipgeo, asCMD=True, lyrname=lyr)
# SHP to Raster
rst_clip = shp_to_rst(
grs_clip, 1, f'rst_{grs_clip}',
cmd=True
)
else:
rst_clip = rst_to_grs(clipgeo, fprop(clipgeo, 'fn'))
# Set region using
rst_to_region(rst_clip)
# Set mask
grs_to_mask(rst_clip)
# Export bands
return [grs_to_rst(
i, os.path.join(outFolder, i + '.tif'),
rtype=int if isint else float
) for i in grs_bands]
def resample_by_majority(refrst, valrst, out_rst):
"""
Resample valrst based on refrst:
Get Majority value of valrst for each cell in refrst
Useful when ref raster has cellsize greater
than value raster.
TODO: Valrst must be of int type
"""
import numpy as np
from osgeo import gdal
from glass.prop.img import get_cell_size, get_nd, rst_epsg
from glass.wt.rst import obj_to_rst
# Data to Array
if type(refrst) == gdal.Dataset:
refsrc = refrst
else:
refsrc = gdal.Open(refrst)
if type(valrst) == gdal.Dataset:
valsrc = valrst
else:
valsrc = gdal.Open(valrst)
refnum = refsrc.ReadAsArray()
valnum = valsrc.ReadAsArray()
# Get Ref shape
ref_shape = refnum.shape
# in a row, how many cells valnum are for each refnum cell
refcs = int(get_cell_size(refsrc)[0])
valcs = int(get_cell_size(valsrc)[0])
dcell = int(refcs / valcs)
# Valnum must be of int type
# Create generalized/resampled raster
resnum = np.zeros(ref_shape, dtype=valnum.dtype)
for row in range(ref_shape[0]):
for col in range(ref_shape[1]):
resnum[row, col] = np.bincount(
valnum[row*dcell:row*dcell+dcell, col*dcell : col*dcell+dcell].reshape(dcell*dcell)
).argmax()
# Export out raster
return obj_to_rst(
resnum, out_rst,
refsrc.GetGeoTransform(), rst_epsg(refsrc),
noData=get_nd(valsrc)
)
| jasp382/glass | glass/rst/rmp.py | rmp.py | py | 4,165 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "os.path.exists",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "glass.pys.oss.mkdir",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "glass.prop.prj.rst_ep... |
24985603198 | import pygame
from settings import BOSSTELEPORTINGSOUND, ENEMYHITSOUND, importFolder, bossPositions
from random import randint
from os import path
class Boss(pygame.sprite.Sprite):
def __init__(self, pos, surface, level):
super().__init__()
#animation
self.displaySurface = surface
self.frameIndex = 0
self.animationSpeed = 0.15
self.status = 'idle'
self.importCharacterAssets()
self.image = self.animations['idle'][self.frameIndex]
self.rect = self.image.get_rect(center = pos)
self.hitbox = pygame.Rect((0,0), (40, 50))
self.canTakeAction = False
self.spawnTime = pygame.time.get_ticks()
self.level = level
self.defeated = False
#stats
self.maxHp = 50
self.hp = self.maxHp
self.specialDamage = 3
#status-gameplay
self.facingRight = False
self.alive = True
self.hasIFrames = False
self.iFramesCD = 500
self.hitTime = 0
self.hit = False
self.location = 2
#bossmagic
self.specialAttackHitboxes = pygame.sprite.Group()
self.canSpecialAttack = True
self.specialAttack = False
self.specialAttackDuration = 600
self.specialAttackCD = 5000
self.specialAttackTime = 0
#bossTeleport
self.teleporting = False
self.canTeleport = False
self.teleportTime = 0
self.teleportDuration = 500
self.teleportCD = 5000
def importCharacterAssets(self):
characterPath = path.join('Assets','enemies','boss')
self.animations = {'idle':[],'attack':[],'death':[],'idle':[],'skill':[],'summon':[]}
for animation in self.animations.keys():
fullPath = path.join(characterPath,animation)
self.animations[animation] = importFolder(fullPath)
def animate(self):
animation = self.animations[self.status]
#loop over frame index
self.frameIndex += self.animationSpeed
if self.frameIndex >= len(animation):
if not self.alive:
self.defeated = True
return
self.frameIndex = 0
image = animation[int(self.frameIndex)]
if self.facingRight:
self.image = image
else:
flippedImage = pygame.transform.flip(image, True, False)
self.image = flippedImage
#set the rect
self.hitbox.center = self.rect.center
def cooldowns(self):
self.hit = False
currentTime = pygame.time.get_ticks()
if self.specialAttack:
self.canSpecialAttack = False
if currentTime - self.specialAttackTime >= self.specialAttackDuration:
self.specialAttack = False
if not self.canSpecialAttack:
if currentTime - self.specialAttackTime >= self.specialAttackCD:
self.canSpecialAttack = True
if self.teleporting:
self.canTeleport = False
if currentTime - self.teleportTime >= self.teleportDuration:
self.teleporting = False
if not self.canTeleport:
if currentTime - self.teleportTime >= self.teleportCD:
self.canTeleport = True
if self.hasIFrames:
self.hit = False
if currentTime - self.hitTime >= self.iFramesCD:
self.hasIFrames = False
if not self.canTakeAction:
if currentTime - self.spawnTime >= 1000:
self.canTakeAction = True
def getStatus(self):
if not self.alive:
self.status = 'death'
elif self.teleporting:
self.status = 'skill'
elif self.specialAttack:
self.status = 'summon'
else:
self.status = 'idle'
def createSpecialAttackHitbox(self):
self.specialAttack = True
self.specialAttackTime = pygame.time.get_ticks()
summon1 = BossSummon((160, 176), self.level.player.sprite.hitbox.center)
summon2 = BossSummon((160, 304), self.level.player.sprite.hitbox.center)
summon3 = BossSummon((656, 176), self.level.player.sprite.hitbox.center)
summon4 = BossSummon((656, 304), self.level.player.sprite.hitbox.center)
self.specialAttackHitboxes.add(summon1)
self.specialAttackHitboxes.add(summon2)
self.specialAttackHitboxes.add(summon3)
self.specialAttackHitboxes.add(summon4)
def loseHP(self, damage):
if not self.hasIFrames and self.alive:
self.hit = True
self.hasIFrames = True
self.hitTime = pygame.time.get_ticks()
self.hp -= damage
ENEMYHITSOUND.play(0)
if self.hp <= 0:
self.alive = False
def changePosition(self):
if self.canTeleport:
BOSSTELEPORTINGSOUND.play(0)
self.teleportTime = pygame.time.get_ticks()
self.teleporting = True
newPosIndex = randint(0,4)
if self.location == bossPositions[newPosIndex]:
if newPosIndex == 4:
newPosIndex = 0
else:
newPosIndex += 1
if newPosIndex in (0,3,2):
self.facingRight = True
else:
self.facingRight = False
self.rect.center = bossPositions[newPosIndex]
def drawing(self, scrolling):
x = int(self.rect.x - scrolling.x)
y = int(self.rect.y - scrolling.y)
self.displaySurface.blit(self.image, (x, y))
def update(self):
if not self.defeated:
self.getStatus()
self.animate()
self.cooldowns()
if self.canTakeAction:
self.changePosition()
if self.canSpecialAttack:
self.createSpecialAttackHitbox()
class BossSummon(pygame.sprite.Sprite):
def __init__(self, spawnPosition, target):
super().__init__()
self.frameIndex = 0
self.animationSpeed = 0.15
self.status = 'idle'
self.importSpellAssets()
self.image = self.animations['idle'][self.frameIndex]
self.rect = self.image.get_rect(center = spawnPosition)
self.hitbox = pygame.Rect(0, 0, 11, 23)
self.target = target
self.spawnPosition = spawnPosition
self.spawnTime = pygame.time.get_ticks()
self.startMove = 1000
self.endLife = 5000
self.speed = 4
if self.target[0] - spawnPosition[0] > 0:
self.goingRight = False
else:
self.goingRight = True
def importSpellAssets(self):
characterPath = path.join('Assets','enemies','bossSummon')
self.animations = {'idle':[],'moving':[]}
for animation in self.animations.keys():
fullPath = path.join(characterPath,animation)
self.animations[animation] = importFolder(fullPath)
def animate(self):
animation = self.animations[self.status]
#loop over frame index
self.frameIndex += self.animationSpeed
if self.frameIndex >= len(animation):
self.frameIndex = 0
image = animation[int(self.frameIndex)]
if self.goingRight:
self.image = image
else:
flippedImage = pygame.transform.flip(image, True, False)
self.image = flippedImage
self.hitbox.center = self.rect.center
def moveTowardTarget(self):
projectileVector = pygame.math.Vector2(self.spawnPosition)
targetVector = pygame.math.Vector2(self.target)
towards = (targetVector - projectileVector).normalize() * self.speed
self.rect.x += towards.x
self.rect.y += towards.y
def drawing(self, surface, scrolling):
x = int(self.rect.x - scrolling.x)
y = int(self.rect.y - scrolling.y)
surface.blit(self.image, (x, y))
def update(self):
self.animate()
currentTime = pygame.time.get_ticks()
if currentTime - self.spawnTime >= self.startMove:
self.status = 'moving'
self.moveTowardTarget()
elif currentTime - self.spawnTime >= self.endLife:
self.kill() | Maltoros/Project-Pygame | boss.py | boss.py | py | 8,327 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pygame.sprite",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "pygame.Rect",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pygame.time.get_ticks",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "pygame.time",
... |
37326026549 | from zipline.pipeline.factors import CustomFactor
from zipline.pipeline.data import USEquityPricing
import numpy as np
import warnings
def recurs_sum(arr):
arr_sum = np.zeros(arr.shape)
arr_sum[0] = arr[0]
for i in range(1, len(arr)):
arr_sum[i] = arr_sum[i-1]+arr[i]
return arr_sum
class AD(CustomFactor):
"""
Chaikin Accumulation Distribution Oscillator
Volume indicator
**Default Inputs:** USEquityPricing.high, USEquityPricing.low, USEquitypricing.close, USEquityPricing.volume
**Default Window Length:** 14
http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:accumulation_distribution_line
"""
inputs = [USEquityPricing.close, USEquityPricing.high, USEquityPricing.low, USEquityPricing.volume]
window_length = 14 + 1
outputs = 'cho_yesterday', 'AD_yesterday', 'cho_today', 'AD_today'
def compute(self, today, assets, out, close, high, low, vol):
# ignore unimportant nan warnings
warnings.filterwarnings('ignore')
# close location value
clv = ((close - low) - (high - close)) / (high - low)
ad = clv * vol
# today
ad_revised = recurs_sum(ad[-14:])
sma_3d = np.nanmean(ad_revised[-3:], axis=0)
sma_10d = np.nanmean(ad_revised[-10:], axis=0)
cho_today = sma_3d - sma_10d
out.cho_today[:] = cho_today
out.AD_today[:] = ad_revised[-1]
# yesterday
ad_revised = recurs_sum(ad[:14])
sma_3d = np.nanmean(ad_revised[-3:], axis=0)
sma_10d = np.nanmean(ad_revised[-10:], axis=0)
cho_yesterday = sma_3d - sma_10d
out.cho_yesterday[:] = cho_yesterday
out.AD_yesterday[:] = ad_revised[-1]
| ahmad-emanuel/quant_trading_system | Indicators/chaikin_oscilator.py | chaikin_oscilator.py | py | 1,745 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "numpy.zeros",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "zipline.pipeline.factors.CustomFactor",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "zipline.pipeline.data.USEquityPricing.close",
"line_number": 29,
"usage_type": "attribut... |
40926602467 | from discord.ext.commands import bot, has_permissions
import discord.ext
from discord.ext import commands
from config import *
import asyncio
import random
# noinspection PyPackageRequirements
intents = discord.Intents.default()
intents.message_content = True
bot = commands.Bot(command_prefix=PREFIX, intents=intents, description="Test bot for discord.py")
# Event for when the bot is ready and connected to Discord
@bot.event
async def on_ready():
print('Bot is online and connected to Discord')
print('------')
# Send a heartbeat message to a specific channel when the bot comes online
channel = bot.get_channel(1015712143145959504)
await channel.send('Bot is online and ready!')
# Member join event notification
@bot.event
async def on_member_join(member, ctx, guild):
guild = ctx.guild
welcomeEmbed = discord.Embed(title=f"A new member has joined!",
description=f"{member.name} has joined the {guild.name} server!",
color=discord.Color.blue())
await bot.get_channel(1015712143145959504).send(embed=welcomeEmbed)
# Define the "hello" command
@bot.command()
async def hello(ctx):
await ctx.send(f"Hello, {ctx.author.name}!")
# Define the "Goodbye" command
@bot.command()
async def goodbye(ctx):
await ctx.send(f"Goodbye, {ctx.author.name}!")
@bot.command()
async def clear(ctx, amount=5): # Specify the amount of messages to clear, default is 5
await ctx.channel.purge(limit=amount + 1) # +1 to include the command message itself
await ctx.send(f'{amount} messages cleared!',
delete_after=5) # Send a confirmation message and delete it after 5 seconds
# Event: on_member_join
@bot.event
async def on_member_join(member):
# Send a welcome message
welcome_channel = discord.utils.get(member.guild.text_channels, name='general') # Specify the channel
welcome_message = f"Welcome {member.mention} to the server! Please choose a role from the following options: Cincinnati, Kentucky, Indiana"
await welcome_channel.send(welcome_message)
# Define a server info command
@bot.event
async def on_message(message, ctx):
if message.author == bot.user:
return
if message.content.startswith('!serverinfo'):
server = ctx.guild
server_name = server.name
server_id = server.id
server_owner = server.owner
server_region = server.region
server_member_count = server.member_count
# Create an embed to display the server information
embed = discord.Embed(title='Server Info', color=discord.Color.green())
embed.add_field(name='Server Name', value=server_name, inline=False)
embed.add_field(name='Server ID', value=server_id, inline=False)
embed.add_field(name='Server Owner', value=server_owner, inline=False)
embed.add_field(name='Server Region', value=server_region, inline=False)
embed.add_field(name='Member Count', value=server_member_count, inline=False)
# Send the embed to the server info command invoker
await ctx.send(embed=embed)
# Slow mode feature
@bot.event
async def on_message(message):
if message.author.bot:
return # Ignore messages from bots
# Implement slow mode
slowmode_seconds = 5 # Set the slow mode duration in seconds
max_messages = 3 # Set the maximum number of messages allowed in the slow mode duration
cooldown = 10 # Initialize the cooldown counter
async for prev_message in message.channel.history(limit=max_messages, before=message):
if prev_message.author == message.author:
# Check if the user has sent enough messages to trigger slow mode
cooldown += 1
if cooldown >= max_messages:
await message.channel.send(f'{message.author.mention}, you are sending messages too quickly. '
f'Please wait {slowmode_seconds} seconds before sending another message.')
await asyncio.sleep(slowmode_seconds)
cooldown = 10
break
await bot.process_commands(message)
# Kick command
@bot.command(name="kick", pass_context=True)
@has_permissions(manage_roles=True, ban_members=True)
async def kick(ctx, member: discord.Member, *, reason=None):
await member.kick(reason=reason)
await ctx.send(f'{member.name} has been kicked.')
# Ban command
@bot.command(name="ban", pass_context=True)
@has_permissions(ban_members=True)
async def ban(ctx, member: discord.Member, *, reason=None):
await member.ban(reason=reason)
await ctx.send(f'{member.name} has been banned.')
# Unban command
@bot.command()
@has_permissions(ban_members=True)
async def unban(ctx, member_id):
banned_users = await ctx.guild.bans()
member = discord.utils.get(banned_users, user__id=int(member_id))
await ctx.guild.unban(member.user)
await ctx.send(f'{member.user.name} has been unbanned.')
# Define the mute and unmute commands
@bot.command()
async def mute(ctx, member: discord.Member):
# Check if the bot has the "manage_roles" permission
if not ctx.author.guild_permissions.manage_roles:
await ctx.send("You don't have permission to mute members.")
return
if not ctx.me.guild_permissions.manage_roles:
await ctx.send("I don't have permission to mute members.")
return
# Find the "Muted" role in the server
muted_role = discord.utils.get(ctx.guild.roles, name="Muted")
# If the "Muted" role doesn't exist, create it
if not muted_role:
muted_role = await ctx.guild.create_role(name="Muted", reason="Mute command")
for channel in ctx.guild.channels:
await channel.set_permissions(muted_role, send_messages=False)
# Add the "Muted" role to the member
await member.add_roles(muted_role)
await ctx.send(f"{member.display_name} has been muted.")
@bot.command()
async def unmute(ctx, member: discord.Member):
# Check if the bot has the "manage_roles" permission
if not ctx.author.guild_permissions.manage_roles:
await ctx.send("You don't have permission to unmute members.")
return
if not ctx.me.guild_permissions.manage_roles:
await ctx.send("I don't have permission to unmute members.")
return
# Find the "Muted" role in the server
muted_role = discord.utils.get(ctx.guild.roles, name="Muted")
# If the "Muted" role doesn't exist, send an error message
if not muted_role:
await ctx.send("The 'Muted' role doesn't exist.")
return
# Remove the "Muted" role from the member
await member.remove_roles(muted_role)
await ctx.send(f"{member.display_name} has been unmuted.")
# Tictactoe
@bot.event
async def on_message(message):
if message.author == bot.user:
return
if message.content.startswith('!tictactoe'):
player1 = message.author
player2 = message.mentions[0] if message.mentions else None
if not player2:
await message.channel.send('Please mention another player to play with.')
return
if player1 == player2:
await message.channel.send('You cannot play against yourself!')
return
board = [':white_large_square:', ':white_large_square:', ':white_large_square:',
':white_large_square:', ':white_large_square:', ':white_large_square:',
':white_large_square:', ':white_large_square:', ':white_large_square:']
winner = None
turn = player1
game_over = False
while not game_over:
if turn == player1:
sign = ':regional_indicator_x:'
else:
sign = ':o:'
await message.channel.send(f'{turn.mention}\'s turn. Choose a position to place {sign} in.')
def check(m):
return m.author == turn and m.content.isdigit() and int(m.content) in range(1, 10) and board[
int(m.content) - 1] == ':white_large_square:'
try:
msg = await bot.wait_for('message', check=check, timeout=30.0)
except asyncio.TimeoutError:
await message.channel.send(f'{turn.mention} took too long to respond. Game ended.')
game_over = True
break
position = int(msg.content) - 1
board[position] = sign
await print_board(message.channel, board)
if check_win(board):
winner = turn
game_over = True
elif check_draw(board):
game_over = True
else:
turn = player2 if turn == player1 else player1
if winner:
await message.channel.send(f'Congratulations, {winner.mention}! You won!')
else:
await message.channel.send('It\'s a draw!')
async def print_board(channel, board):
line = ''
for i, sign in enumerate(board):
if i % 3 == 2:
line += sign + '\n'
else:
line += sign
await channel.send(line)
def check_win(board):
return (board[0] == board[1] == board[2] != ':white_large_square:' or
board[3] == board[4] == board[5] != ':white_large_square:' or
board[6] == board[7] == board[8] != ':white_large_square:' or
board[0] == board[3] == board[6] != ':white_large_square:' or
board[1] == board[4] == board[7] != ':white_large_square:' or
board[2] == board[5] == board[8] != ':white_large_square:' or
board[0] == board[4] == board[8] != ':white_large_square:' or
board[2] == board[4] == board[6] != ':white_large_square:')
def check_draw(board):
return all([sign != ':white_large_square:' for sign in board])
# Define a class for the blackjack game
class Blackjack:
def __init__(self, ctx):
self.ctx = ctx
self.deck = ['2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K', 'A'] * 4
self.player_hand = []
self.dealer_hand = []
self.player_score = 0
self.dealer_score = 0
def deal(self):
for _ in range(2):
card = random.choice(self.deck)
self.player_hand.append(card)
self.deck.remove(card)
card = random.choice(self.deck)
self.dealer_hand.append(card)
self.deck.remove(card)
def hit(self, hand):
card = random.choice(self.deck)
hand.append(card)
self.deck.remove(card)
def calculate_score(self, hand):
score = 0
num_aces = 0
for card in hand:
if card == 'A':
num_aces += 1
elif card in ['J', 'Q', 'K']:
score += 10
else:
score += int(card)
while num_aces > 0 and score > 21:
score -= 10
num_aces -= 1
return score
async def play(self):
self.deal()
await self.ctx.send(f"**Your hand:** {', '.join(self.player_hand)}")
await self.ctx.send(f"**Dealer's up card:** {self.dealer_hand[0]}")
while True:
await self.ctx.send("Do you want to hit or stand? (Type `hit` or `stand`)")
try:
msg = await bot.wait_for('message', check=lambda message: message.author == self.ctx.author,
timeout=30)
if msg.content.lower() == 'hit':
self.hit(self.player_hand)
await self.ctx.send(f"**Your hand:** {', '.join(self.player_hand)}")
self.player_score = self.calculate_score(self.player_hand)
if self.player_score > 21:
await self.ctx.send("Bust! You lost!")
break
elif msg.content.lower() == 'stand':
while self.calculate_score(self.dealer_hand) < 17:
self.hit(self.dealer_hand)
self.dealer_score = self.calculate_score(self.dealer_hand)
await self.ctx.send(f"**Dealer's hand:** {', '.join(self.dealer_hand)}")
if self.dealer_score > 21:
await self.ctx.send("Dealer busts! You win!")
elif self.dealer_score < self.player_score:
await self.ctx.send("You win!")
elif self.dealer_score > self.player_score:
await self.ctx.send("You lost!")
else:
await self.ctx.send("It's a tie!")
break
else:
await self.ctx.send("Invalid choice! Type `hit` or `stand`.")
except asyncio.TimeoutError:
await self.ctx.send("Timeout! You took too long to respond.")
break
# Define a command for playing blackjack
@bot.command()
async def blackjack(ctx):
game = Blackjack(ctx)
await game.play()
# Define the slots emojis
SLOTS_EMOJIS = ['🍇', '🍊', '🍒', '🍓', '🍌', '🍏', '🍆']
@bot.event
async def on_message(message):
if message.author.bot:
return
if message.content.startswith('!slots'):
# Spin the slots
slots_result = spin_slots()
# Send the slots result
await message.channel.send(slots_result)
def spin_slots():
# Generate a list of random slots emojis
slots = [random.choice(SLOTS_EMOJIS) for _ in range(3)]
# Check for a winning combination
if slots[0] == slots[1] and slots[1] == slots[2]:
# If all three emojis are the same, it's a win
return f'{slots[0]} {slots[1]} {slots[2]}\nYou win!'
else:
# Otherwise, it's a loss
return f'{slots[0]} {slots[1]} {slots[2]}\nYou lose.'
@bot.command()
async def roulette(ctx, bet: int, guess: str):
# Convert guess to lowercase for case-insensitive comparison
guess = guess.lower()
valid_guesses = ['red', 'black', 'green']
if guess not in valid_guesses:
await ctx.send('Invalid guess. Please choose from red, black, or green.')
return
if bet <= 0:
await ctx.send('Invalid bet. Please place a bet greater than 0.')
return
# Simulate the roulette spin
spin = random.choice(valid_guesses)
if spin == 'green':
win_amount = bet * 14
if guess == 'green':
await ctx.send(f'Congratulations! The spin is **{spin}**. You win {win_amount} credits!')
else:
await ctx.send(f'Unlucky! The spin is **{spin}**. You lose {bet} credits.')
else:
win_amount = bet * 2
if guess == spin:
await ctx.send(f'Congratulations! The spin is **{spin}**. You win {win_amount} credits!')
else:
await ctx.send(f'Unlucky! The spin is **{spin}**. You lose {bet} credits.')
@bot.event
async def on_message(message):
if message.author.bot:
return
if message.content.startswith('!rps'):
# Split the message into two parts: command and opponent's choice
parts = message.content.split()
if len(parts) != 2:
await message.channel.send('Usage: !rps <rock/paper/scissors>')
return
# Store the user's choice and generate a random choice for the opponent
user_choice = parts[1].lower()
choices = ['rock', 'paper', 'scissors']
bot_choice = random.choice(choices)
# Check if the user's choice is valid
if user_choice not in choices:
await message.channel.send('Invalid choice. Choose either rock, paper, or scissors.')
return
# Determine the winner and send the result to the channel
if user_choice == bot_choice:
await message.channel.send(f'Tie! Both chose {user_choice}.')
elif (user_choice == 'rock' and bot_choice == 'scissors') or \
(user_choice == 'paper' and bot_choice == 'rock') or \
(user_choice == 'scissors' and bot_choice == 'paper'):
await message.channel.send(f'You won! You chose {user_choice} and the bot chose {bot_choice}.')
else:
await message.channel.send(f'You lost! You chose {user_choice} and the bot chose {bot_choice}.')
bot.run(TOKEN, reconnect=True)
| Juilfsjc/SeniorDesign | main.py | main.py | py | 16,345 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "discord.ext.commands.Intents.default",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.Intents",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 9,
"usage_type": "name"
},
... |
18252823651 | import bisect
from typing import List
class Solution:
def maxEnvelopes(self, envelopes: List[List[int]]) -> int:
items = sorted(envelopes, key=lambda x: (x[0], -x[1]))
piles = []
for item in items:
v = item[1]
i = bisect.bisect_left(piles, v)
if i == len(piles):
piles.append(v)
else:
piles[i] = v
return len(piles)
solution = Solution()
envelopes = [[5,4],[6,4],[6,7],[2,3]]
assert solution.maxEnvelopes(envelopes) == 3, "Should be 3" | hujienan/Jet-Algorithm | leetcode/354. Russian Doll Envelopes/index.py | index.py | py | 554 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.List",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "bisect.bisect_left",
"line_number": 11,
"usage_type": "call"
}
] |
10840945598 | import os
import requests
from bs4 import BeautifulSoup
#os.system("clear")
def crawl():
url = "https://www.iban.com/currency-codes"
iban_result = requests.get(url)
iban_soup = BeautifulSoup(iban_result.text, "html.parser")
table = iban_soup.find("table", {"class": "table table-bordered downloads tablesorter"})
tbody = table.find("tbody")
tds = tbody.find_all("td")
information = {}
length = len(tds)
key = 0
country = []
for i in range(0, length, 4):
if tds[i + 1].string == "No universal currency":
continue
else:
country.append(tds[i].string.capitalize())
country.append(tds[i + 1].string.capitalize())
country.append(tds[i + 2].string)
country.append(tds[i + 3].string)
information[key] = country
key = key + 1
country = []
return information
def main():
country_dic = crawl()
print("Hello! Please choose select a country by number: ")
for i in range(len(country_dic)):
print('# {} {}'.format(i, country_dic[i][0]))
while (True):
try:
country_num = int(input("#: "))
if country_num >= len(country_dic):
print("Choose a number from the list.")
continue
else:
break
except:
print("That wasn't a number.")
continue
print(f"You choose {country_dic[country_num][0]}")
print(f"The currency code is {country_dic[country_num][2]}")
main()
| cheonjiwan/python_challenge | assignment/Day5.py | Day5.py | py | 1,568 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 12,
"usage_type": "call"
}
] |
15744786007 | import errno
import filecmp
import glob
import os
import platform
import random
import re
import shutil
import stat
import subprocess
import sys
import tarfile
import tempfile
import time
from typing import List, NamedTuple
import urllib.parse
from color import Coloring
from error import DownloadError
from error import GitError
from error import ManifestInvalidPathError
from error import ManifestInvalidRevisionError
from error import ManifestParseError
from error import NoManifestException
from error import RepoError
from error import UploadError
import fetch
from git_command import git_require
from git_command import GitCommand
from git_config import GetSchemeFromUrl
from git_config import GetUrlCookieFile
from git_config import GitConfig
from git_config import IsId
from git_refs import GitRefs
from git_refs import HEAD
from git_refs import R_HEADS
from git_refs import R_M
from git_refs import R_PUB
from git_refs import R_TAGS
from git_refs import R_WORKTREE_M
import git_superproject
from git_trace2_event_log import EventLog
import platform_utils
import progress
from repo_logging import RepoLogger
from repo_trace import Trace
logger = RepoLogger(__file__)
class SyncNetworkHalfResult(NamedTuple):
"""Sync_NetworkHalf return value."""
# Did we query the remote? False when optimized_fetch is True and we have
# the commit already present.
remote_fetched: bool
# Error from SyncNetworkHalf
error: Exception = None
@property
def success(self) -> bool:
return not self.error
class SyncNetworkHalfError(RepoError):
"""Failure trying to sync."""
class DeleteWorktreeError(RepoError):
"""Failure to delete worktree."""
def __init__(
self, *args, aggregate_errors: List[Exception] = None, **kwargs
) -> None:
super().__init__(*args, **kwargs)
self.aggregate_errors = aggregate_errors or []
class DeleteDirtyWorktreeError(DeleteWorktreeError):
"""Failure to delete worktree due to uncommitted changes."""
# Maximum sleep time allowed during retries.
MAXIMUM_RETRY_SLEEP_SEC = 3600.0
# +-10% random jitter is added to each Fetches retry sleep duration.
RETRY_JITTER_PERCENT = 0.1
# Whether to use alternates. Switching back and forth is *NOT* supported.
# TODO(vapier): Remove knob once behavior is verified.
_ALTERNATES = os.environ.get("REPO_USE_ALTERNATES") == "1"
def _lwrite(path, content):
lock = "%s.lock" % path
# Maintain Unix line endings on all OS's to match git behavior.
with open(lock, "w", newline="\n") as fd:
fd.write(content)
try:
platform_utils.rename(lock, path)
except OSError:
platform_utils.remove(lock)
raise
def not_rev(r):
return "^" + r
def sq(r):
return "'" + r.replace("'", "'''") + "'"
_project_hook_list = None
def _ProjectHooks():
"""List the hooks present in the 'hooks' directory.
These hooks are project hooks and are copied to the '.git/hooks' directory
of all subprojects.
This function caches the list of hooks (based on the contents of the
'repo/hooks' directory) on the first call.
Returns:
A list of absolute paths to all of the files in the hooks directory.
"""
global _project_hook_list
if _project_hook_list is None:
d = platform_utils.realpath(os.path.abspath(os.path.dirname(__file__)))
d = os.path.join(d, "hooks")
_project_hook_list = [
os.path.join(d, x) for x in platform_utils.listdir(d)
]
return _project_hook_list
class DownloadedChange:
_commit_cache = None
def __init__(self, project, base, change_id, ps_id, commit):
self.project = project
self.base = base
self.change_id = change_id
self.ps_id = ps_id
self.commit = commit
@property
def commits(self):
if self._commit_cache is None:
self._commit_cache = self.project.bare_git.rev_list(
"--abbrev=8",
"--abbrev-commit",
"--pretty=oneline",
"--reverse",
"--date-order",
not_rev(self.base),
self.commit,
"--",
)
return self._commit_cache
class ReviewableBranch:
_commit_cache = None
_base_exists = None
def __init__(self, project, branch, base):
self.project = project
self.branch = branch
self.base = base
@property
def name(self):
return self.branch.name
@property
def commits(self):
if self._commit_cache is None:
args = (
"--abbrev=8",
"--abbrev-commit",
"--pretty=oneline",
"--reverse",
"--date-order",
not_rev(self.base),
R_HEADS + self.name,
"--",
)
try:
self._commit_cache = self.project.bare_git.rev_list(
*args, log_as_error=self.base_exists
)
except GitError:
# We weren't able to probe the commits for this branch. Was it
# tracking a branch that no longer exists? If so, return no
# commits. Otherwise, rethrow the error as we don't know what's
# going on.
if self.base_exists:
raise
self._commit_cache = []
return self._commit_cache
@property
def unabbrev_commits(self):
r = dict()
for commit in self.project.bare_git.rev_list(
not_rev(self.base), R_HEADS + self.name, "--"
):
r[commit[0:8]] = commit
return r
@property
def date(self):
return self.project.bare_git.log(
"--pretty=format:%cd", "-n", "1", R_HEADS + self.name, "--"
)
@property
def base_exists(self):
"""Whether the branch we're tracking exists.
Normally it should, but sometimes branches we track can get deleted.
"""
if self._base_exists is None:
try:
self.project.bare_git.rev_parse("--verify", not_rev(self.base))
# If we're still here, the base branch exists.
self._base_exists = True
except GitError:
# If we failed to verify, the base branch doesn't exist.
self._base_exists = False
return self._base_exists
def UploadForReview(
self,
people,
dryrun=False,
auto_topic=False,
hashtags=(),
labels=(),
private=False,
notify=None,
wip=False,
ready=False,
dest_branch=None,
validate_certs=True,
push_options=None,
):
self.project.UploadForReview(
branch=self.name,
people=people,
dryrun=dryrun,
auto_topic=auto_topic,
hashtags=hashtags,
labels=labels,
private=private,
notify=notify,
wip=wip,
ready=ready,
dest_branch=dest_branch,
validate_certs=validate_certs,
push_options=push_options,
)
def GetPublishedRefs(self):
refs = {}
output = self.project.bare_git.ls_remote(
self.branch.remote.SshReviewUrl(self.project.UserEmail),
"refs/changes/*",
)
for line in output.split("\n"):
try:
(sha, ref) = line.split()
refs[sha] = ref
except ValueError:
pass
return refs
class StatusColoring(Coloring):
def __init__(self, config):
super().__init__(config, "status")
self.project = self.printer("header", attr="bold")
self.branch = self.printer("header", attr="bold")
self.nobranch = self.printer("nobranch", fg="red")
self.important = self.printer("important", fg="red")
self.added = self.printer("added", fg="green")
self.changed = self.printer("changed", fg="red")
self.untracked = self.printer("untracked", fg="red")
class DiffColoring(Coloring):
def __init__(self, config):
super().__init__(config, "diff")
self.project = self.printer("header", attr="bold")
self.fail = self.printer("fail", fg="red")
class Annotation:
def __init__(self, name, value, keep):
self.name = name
self.value = value
self.keep = keep
def __eq__(self, other):
if not isinstance(other, Annotation):
return False
return self.__dict__ == other.__dict__
def __lt__(self, other):
# This exists just so that lists of Annotation objects can be sorted,
# for use in comparisons.
if not isinstance(other, Annotation):
raise ValueError("comparison is not between two Annotation objects")
if self.name == other.name:
if self.value == other.value:
return self.keep < other.keep
return self.value < other.value
return self.name < other.name
def _SafeExpandPath(base, subpath, skipfinal=False):
"""Make sure |subpath| is completely safe under |base|.
We make sure no intermediate symlinks are traversed, and that the final path
is not a special file (e.g. not a socket or fifo).
NB: We rely on a number of paths already being filtered out while parsing
the manifest. See the validation logic in manifest_xml.py for more details.
"""
# Split up the path by its components. We can't use os.path.sep exclusively
# as some platforms (like Windows) will convert / to \ and that bypasses all
# our constructed logic here. Especially since manifest authors only use
# / in their paths.
resep = re.compile(r"[/%s]" % re.escape(os.path.sep))
components = resep.split(subpath)
if skipfinal:
# Whether the caller handles the final component itself.
finalpart = components.pop()
path = base
for part in components:
if part in {".", ".."}:
raise ManifestInvalidPathError(
'%s: "%s" not allowed in paths' % (subpath, part)
)
path = os.path.join(path, part)
if platform_utils.islink(path):
raise ManifestInvalidPathError(
"%s: traversing symlinks not allow" % (path,)
)
if os.path.exists(path):
if not os.path.isfile(path) and not platform_utils.isdir(path):
raise ManifestInvalidPathError(
"%s: only regular files & directories allowed" % (path,)
)
if skipfinal:
path = os.path.join(path, finalpart)
return path
class _CopyFile:
"""Container for <copyfile> manifest element."""
def __init__(self, git_worktree, src, topdir, dest):
"""Register a <copyfile> request.
Args:
git_worktree: Absolute path to the git project checkout.
src: Relative path under |git_worktree| of file to read.
topdir: Absolute path to the top of the repo client checkout.
dest: Relative path under |topdir| of file to write.
"""
self.git_worktree = git_worktree
self.topdir = topdir
self.src = src
self.dest = dest
def _Copy(self):
src = _SafeExpandPath(self.git_worktree, self.src)
dest = _SafeExpandPath(self.topdir, self.dest)
if platform_utils.isdir(src):
raise ManifestInvalidPathError(
"%s: copying from directory not supported" % (self.src,)
)
if platform_utils.isdir(dest):
raise ManifestInvalidPathError(
"%s: copying to directory not allowed" % (self.dest,)
)
# Copy file if it does not exist or is out of date.
if not os.path.exists(dest) or not filecmp.cmp(src, dest):
try:
# Remove existing file first, since it might be read-only.
if os.path.exists(dest):
platform_utils.remove(dest)
else:
dest_dir = os.path.dirname(dest)
if not platform_utils.isdir(dest_dir):
os.makedirs(dest_dir)
shutil.copy(src, dest)
# Make the file read-only.
mode = os.stat(dest)[stat.ST_MODE]
mode = mode & ~(stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH)
os.chmod(dest, mode)
except OSError:
logger.error("error: Cannot copy file %s to %s", src, dest)
class _LinkFile:
"""Container for <linkfile> manifest element."""
def __init__(self, git_worktree, src, topdir, dest):
"""Register a <linkfile> request.
Args:
git_worktree: Absolute path to the git project checkout.
src: Target of symlink relative to path under |git_worktree|.
topdir: Absolute path to the top of the repo client checkout.
dest: Relative path under |topdir| of symlink to create.
"""
self.git_worktree = git_worktree
self.topdir = topdir
self.src = src
self.dest = dest
def __linkIt(self, relSrc, absDest):
# Link file if it does not exist or is out of date.
if not platform_utils.islink(absDest) or (
platform_utils.readlink(absDest) != relSrc
):
try:
# Remove existing file first, since it might be read-only.
if os.path.lexists(absDest):
platform_utils.remove(absDest)
else:
dest_dir = os.path.dirname(absDest)
if not platform_utils.isdir(dest_dir):
os.makedirs(dest_dir)
platform_utils.symlink(relSrc, absDest)
except OSError:
logger.error(
"error: Cannot link file %s to %s", relSrc, absDest
)
def _Link(self):
"""Link the self.src & self.dest paths.
Handles wild cards on the src linking all of the files in the source in
to the destination directory.
"""
# Some people use src="." to create stable links to projects. Let's
# allow that but reject all other uses of "." to keep things simple.
if self.src == ".":
src = self.git_worktree
else:
src = _SafeExpandPath(self.git_worktree, self.src)
if not glob.has_magic(src):
# Entity does not contain a wild card so just a simple one to one
# link operation.
dest = _SafeExpandPath(self.topdir, self.dest, skipfinal=True)
# dest & src are absolute paths at this point. Make sure the target
# of the symlink is relative in the context of the repo client
# checkout.
relpath = os.path.relpath(src, os.path.dirname(dest))
self.__linkIt(relpath, dest)
else:
dest = _SafeExpandPath(self.topdir, self.dest)
# Entity contains a wild card.
if os.path.exists(dest) and not platform_utils.isdir(dest):
logger.error(
"Link error: src with wildcard, %s must be a directory",
dest,
)
else:
for absSrcFile in glob.glob(src):
# Create a releative path from source dir to destination
# dir.
absSrcDir = os.path.dirname(absSrcFile)
relSrcDir = os.path.relpath(absSrcDir, dest)
# Get the source file name.
srcFile = os.path.basename(absSrcFile)
# Now form the final full paths to srcFile. They will be
# absolute for the desintaiton and relative for the source.
absDest = os.path.join(dest, srcFile)
relSrc = os.path.join(relSrcDir, srcFile)
self.__linkIt(relSrc, absDest)
class RemoteSpec:
def __init__(
self,
name,
url=None,
pushUrl=None,
review=None,
revision=None,
orig_name=None,
fetchUrl=None,
):
self.name = name
self.url = url
self.pushUrl = pushUrl
self.review = review
self.revision = revision
self.orig_name = orig_name
self.fetchUrl = fetchUrl
class Project:
# These objects can be shared between several working trees.
@property
def shareable_dirs(self):
"""Return the shareable directories"""
if self.UseAlternates:
return ["hooks", "rr-cache"]
else:
return ["hooks", "objects", "rr-cache"]
def __init__(
self,
manifest,
name,
remote,
gitdir,
objdir,
worktree,
relpath,
revisionExpr,
revisionId,
rebase=True,
groups=None,
sync_c=False,
sync_s=False,
sync_tags=True,
clone_depth=None,
upstream=None,
parent=None,
use_git_worktrees=False,
is_derived=False,
dest_branch=None,
optimized_fetch=False,
retry_fetches=0,
old_revision=None,
):
"""Init a Project object.
Args:
manifest: The XmlManifest object.
name: The `name` attribute of manifest.xml's project element.
remote: RemoteSpec object specifying its remote's properties.
gitdir: Absolute path of git directory.
objdir: Absolute path of directory to store git objects.
worktree: Absolute path of git working tree.
relpath: Relative path of git working tree to repo's top directory.
revisionExpr: The `revision` attribute of manifest.xml's project
element.
revisionId: git commit id for checking out.
rebase: The `rebase` attribute of manifest.xml's project element.
groups: The `groups` attribute of manifest.xml's project element.
sync_c: The `sync-c` attribute of manifest.xml's project element.
sync_s: The `sync-s` attribute of manifest.xml's project element.
sync_tags: The `sync-tags` attribute of manifest.xml's project
element.
upstream: The `upstream` attribute of manifest.xml's project
element.
parent: The parent Project object.
use_git_worktrees: Whether to use `git worktree` for this project.
is_derived: False if the project was explicitly defined in the
manifest; True if the project is a discovered submodule.
dest_branch: The branch to which to push changes for review by
default.
optimized_fetch: If True, when a project is set to a sha1 revision,
only fetch from the remote if the sha1 is not present locally.
retry_fetches: Retry remote fetches n times upon receiving transient
error with exponential backoff and jitter.
old_revision: saved git commit id for open GITC projects.
"""
self.client = self.manifest = manifest
self.name = name
self.remote = remote
self.UpdatePaths(relpath, worktree, gitdir, objdir)
self.SetRevision(revisionExpr, revisionId=revisionId)
self.rebase = rebase
self.groups = groups
self.sync_c = sync_c
self.sync_s = sync_s
self.sync_tags = sync_tags
self.clone_depth = clone_depth
self.upstream = upstream
self.parent = parent
# NB: Do not use this setting in __init__ to change behavior so that the
# manifest.git checkout can inspect & change it after instantiating.
# See the XmlManifest init code for more info.
self.use_git_worktrees = use_git_worktrees
self.is_derived = is_derived
self.optimized_fetch = optimized_fetch
self.retry_fetches = max(0, retry_fetches)
self.subprojects = []
self.snapshots = {}
self.copyfiles = []
self.linkfiles = []
self.annotations = []
self.dest_branch = dest_branch
self.old_revision = old_revision
# This will be filled in if a project is later identified to be the
# project containing repo hooks.
self.enabled_repo_hooks = []
def RelPath(self, local=True):
"""Return the path for the project relative to a manifest.
Args:
local: a boolean, if True, the path is relative to the local
(sub)manifest. If false, the path is relative to the outermost
manifest.
"""
if local:
return self.relpath
return os.path.join(self.manifest.path_prefix, self.relpath)
def SetRevision(self, revisionExpr, revisionId=None):
"""Set revisionId based on revision expression and id"""
self.revisionExpr = revisionExpr
if revisionId is None and revisionExpr and IsId(revisionExpr):
self.revisionId = self.revisionExpr
else:
self.revisionId = revisionId
def UpdatePaths(self, relpath, worktree, gitdir, objdir):
"""Update paths used by this project"""
self.gitdir = gitdir.replace("\\", "/")
self.objdir = objdir.replace("\\", "/")
if worktree:
self.worktree = os.path.normpath(worktree).replace("\\", "/")
else:
self.worktree = None
self.relpath = relpath
self.config = GitConfig.ForRepository(
gitdir=self.gitdir, defaults=self.manifest.globalConfig
)
if self.worktree:
self.work_git = self._GitGetByExec(
self, bare=False, gitdir=self.gitdir
)
else:
self.work_git = None
self.bare_git = self._GitGetByExec(self, bare=True, gitdir=self.gitdir)
self.bare_ref = GitRefs(self.gitdir)
self.bare_objdir = self._GitGetByExec(
self, bare=True, gitdir=self.objdir
)
@property
def UseAlternates(self):
"""Whether git alternates are in use.
This will be removed once migration to alternates is complete.
"""
return _ALTERNATES or self.manifest.is_multimanifest
@property
def Derived(self):
return self.is_derived
@property
def Exists(self):
return platform_utils.isdir(self.gitdir) and platform_utils.isdir(
self.objdir
)
@property
def CurrentBranch(self):
"""Obtain the name of the currently checked out branch.
The branch name omits the 'refs/heads/' prefix.
None is returned if the project is on a detached HEAD, or if the
work_git is otheriwse inaccessible (e.g. an incomplete sync).
"""
try:
b = self.work_git.GetHead()
except NoManifestException:
# If the local checkout is in a bad state, don't barf. Let the
# callers process this like the head is unreadable.
return None
if b.startswith(R_HEADS):
return b[len(R_HEADS) :]
return None
def IsRebaseInProgress(self):
return (
os.path.exists(self.work_git.GetDotgitPath("rebase-apply"))
or os.path.exists(self.work_git.GetDotgitPath("rebase-merge"))
or os.path.exists(os.path.join(self.worktree, ".dotest"))
)
def IsDirty(self, consider_untracked=True):
"""Is the working directory modified in some way?"""
self.work_git.update_index(
"-q", "--unmerged", "--ignore-missing", "--refresh"
)
if self.work_git.DiffZ("diff-index", "-M", "--cached", HEAD):
return True
if self.work_git.DiffZ("diff-files"):
return True
if consider_untracked and self.UntrackedFiles():
return True
return False
_userident_name = None
_userident_email = None
@property
def UserName(self):
"""Obtain the user's personal name."""
if self._userident_name is None:
self._LoadUserIdentity()
return self._userident_name
@property
def UserEmail(self):
"""Obtain the user's email address. This is very likely
to be their Gerrit login.
"""
if self._userident_email is None:
self._LoadUserIdentity()
return self._userident_email
def _LoadUserIdentity(self):
u = self.bare_git.var("GIT_COMMITTER_IDENT")
m = re.compile("^(.*) <([^>]*)> ").match(u)
if m:
self._userident_name = m.group(1)
self._userident_email = m.group(2)
else:
self._userident_name = ""
self._userident_email = ""
def GetRemote(self, name=None):
"""Get the configuration for a single remote.
Defaults to the current project's remote.
"""
if name is None:
name = self.remote.name
return self.config.GetRemote(name)
def GetBranch(self, name):
"""Get the configuration for a single branch."""
return self.config.GetBranch(name)
def GetBranches(self):
"""Get all existing local branches."""
current = self.CurrentBranch
all_refs = self._allrefs
heads = {}
for name, ref_id in all_refs.items():
if name.startswith(R_HEADS):
name = name[len(R_HEADS) :]
b = self.GetBranch(name)
b.current = name == current
b.published = None
b.revision = ref_id
heads[name] = b
for name, ref_id in all_refs.items():
if name.startswith(R_PUB):
name = name[len(R_PUB) :]
b = heads.get(name)
if b:
b.published = ref_id
return heads
def MatchesGroups(self, manifest_groups):
"""Returns true if the manifest groups specified at init should cause
this project to be synced.
Prefixing a manifest group with "-" inverts the meaning of a group.
All projects are implicitly labelled with "all".
labels are resolved in order. In the example case of
project_groups: "all,group1,group2"
manifest_groups: "-group1,group2"
the project will be matched.
The special manifest group "default" will match any project that
does not have the special project group "notdefault"
"""
default_groups = self.manifest.default_groups or ["default"]
expanded_manifest_groups = manifest_groups or default_groups
expanded_project_groups = ["all"] + (self.groups or [])
if "notdefault" not in expanded_project_groups:
expanded_project_groups += ["default"]
matched = False
for group in expanded_manifest_groups:
if group.startswith("-") and group[1:] in expanded_project_groups:
matched = False
elif group in expanded_project_groups:
matched = True
return matched
def UncommitedFiles(self, get_all=True):
"""Returns a list of strings, uncommitted files in the git tree.
Args:
get_all: a boolean, if True - get information about all different
uncommitted files. If False - return as soon as any kind of
uncommitted files is detected.
"""
details = []
self.work_git.update_index(
"-q", "--unmerged", "--ignore-missing", "--refresh"
)
if self.IsRebaseInProgress():
details.append("rebase in progress")
if not get_all:
return details
changes = self.work_git.DiffZ("diff-index", "--cached", HEAD).keys()
if changes:
details.extend(changes)
if not get_all:
return details
changes = self.work_git.DiffZ("diff-files").keys()
if changes:
details.extend(changes)
if not get_all:
return details
changes = self.UntrackedFiles()
if changes:
details.extend(changes)
return details
def UntrackedFiles(self):
"""Returns a list of strings, untracked files in the git tree."""
return self.work_git.LsOthers()
def HasChanges(self):
"""Returns true if there are uncommitted changes."""
return bool(self.UncommitedFiles(get_all=False))
def PrintWorkTreeStatus(self, output_redir=None, quiet=False, local=False):
"""Prints the status of the repository to stdout.
Args:
output_redir: If specified, redirect the output to this object.
quiet: If True then only print the project name. Do not print
the modified files, branch name, etc.
local: a boolean, if True, the path is relative to the local
(sub)manifest. If false, the path is relative to the outermost
manifest.
"""
if not platform_utils.isdir(self.worktree):
if output_redir is None:
output_redir = sys.stdout
print(file=output_redir)
print("project %s/" % self.RelPath(local), file=output_redir)
print(' missing (run "repo sync")', file=output_redir)
return
self.work_git.update_index(
"-q", "--unmerged", "--ignore-missing", "--refresh"
)
rb = self.IsRebaseInProgress()
di = self.work_git.DiffZ("diff-index", "-M", "--cached", HEAD)
df = self.work_git.DiffZ("diff-files")
do = self.work_git.LsOthers()
if not rb and not di and not df and not do and not self.CurrentBranch:
return "CLEAN"
out = StatusColoring(self.config)
if output_redir is not None:
out.redirect(output_redir)
out.project("project %-40s", self.RelPath(local) + "/ ")
if quiet:
out.nl()
return "DIRTY"
branch = self.CurrentBranch
if branch is None:
out.nobranch("(*** NO BRANCH ***)")
else:
out.branch("branch %s", branch)
out.nl()
if rb:
out.important("prior sync failed; rebase still in progress")
out.nl()
paths = list()
paths.extend(di.keys())
paths.extend(df.keys())
paths.extend(do)
for p in sorted(set(paths)):
try:
i = di[p]
except KeyError:
i = None
try:
f = df[p]
except KeyError:
f = None
if i:
i_status = i.status.upper()
else:
i_status = "-"
if f:
f_status = f.status.lower()
else:
f_status = "-"
if i and i.src_path:
line = " %s%s\t%s => %s (%s%%)" % (
i_status,
f_status,
i.src_path,
p,
i.level,
)
else:
line = " %s%s\t%s" % (i_status, f_status, p)
if i and not f:
out.added("%s", line)
elif (i and f) or (not i and f):
out.changed("%s", line)
elif not i and not f:
out.untracked("%s", line)
else:
out.write("%s", line)
out.nl()
return "DIRTY"
def PrintWorkTreeDiff(
self, absolute_paths=False, output_redir=None, local=False
):
"""Prints the status of the repository to stdout."""
out = DiffColoring(self.config)
if output_redir:
out.redirect(output_redir)
cmd = ["diff"]
if out.is_on:
cmd.append("--color")
cmd.append(HEAD)
if absolute_paths:
cmd.append("--src-prefix=a/%s/" % self.RelPath(local))
cmd.append("--dst-prefix=b/%s/" % self.RelPath(local))
cmd.append("--")
try:
p = GitCommand(self, cmd, capture_stdout=True, capture_stderr=True)
p.Wait()
except GitError as e:
out.nl()
out.project("project %s/" % self.RelPath(local))
out.nl()
out.fail("%s", str(e))
out.nl()
return False
if p.stdout:
out.nl()
out.project("project %s/" % self.RelPath(local))
out.nl()
out.write("%s", p.stdout)
return p.Wait() == 0
def WasPublished(self, branch, all_refs=None):
"""Was the branch published (uploaded) for code review?
If so, returns the SHA-1 hash of the last published
state for the branch.
"""
key = R_PUB + branch
if all_refs is None:
try:
return self.bare_git.rev_parse(key)
except GitError:
return None
else:
try:
return all_refs[key]
except KeyError:
return None
def CleanPublishedCache(self, all_refs=None):
"""Prunes any stale published refs."""
if all_refs is None:
all_refs = self._allrefs
heads = set()
canrm = {}
for name, ref_id in all_refs.items():
if name.startswith(R_HEADS):
heads.add(name)
elif name.startswith(R_PUB):
canrm[name] = ref_id
for name, ref_id in canrm.items():
n = name[len(R_PUB) :]
if R_HEADS + n not in heads:
self.bare_git.DeleteRef(name, ref_id)
def GetUploadableBranches(self, selected_branch=None):
"""List any branches which can be uploaded for review."""
heads = {}
pubed = {}
for name, ref_id in self._allrefs.items():
if name.startswith(R_HEADS):
heads[name[len(R_HEADS) :]] = ref_id
elif name.startswith(R_PUB):
pubed[name[len(R_PUB) :]] = ref_id
ready = []
for branch, ref_id in heads.items():
if branch in pubed and pubed[branch] == ref_id:
continue
if selected_branch and branch != selected_branch:
continue
rb = self.GetUploadableBranch(branch)
if rb:
ready.append(rb)
return ready
def GetUploadableBranch(self, branch_name):
"""Get a single uploadable branch, or None."""
branch = self.GetBranch(branch_name)
base = branch.LocalMerge
if branch.LocalMerge:
rb = ReviewableBranch(self, branch, base)
if rb.commits:
return rb
return None
def UploadForReview(
self,
branch=None,
people=([], []),
dryrun=False,
auto_topic=False,
hashtags=(),
labels=(),
private=False,
notify=None,
wip=False,
ready=False,
dest_branch=None,
validate_certs=True,
push_options=None,
):
"""Uploads the named branch for code review."""
if branch is None:
branch = self.CurrentBranch
if branch is None:
raise GitError("not currently on a branch", project=self.name)
branch = self.GetBranch(branch)
if not branch.LocalMerge:
raise GitError(
"branch %s does not track a remote" % branch.name,
project=self.name,
)
if not branch.remote.review:
raise GitError(
"remote %s has no review url" % branch.remote.name,
project=self.name,
)
# Basic validity check on label syntax.
for label in labels:
if not re.match(r"^.+[+-][0-9]+$", label):
raise UploadError(
f'invalid label syntax "{label}": labels use forms like '
"CodeReview+1 or Verified-1",
project=self.name,
)
if dest_branch is None:
dest_branch = self.dest_branch
if dest_branch is None:
dest_branch = branch.merge
if not dest_branch.startswith(R_HEADS):
dest_branch = R_HEADS + dest_branch
if not branch.remote.projectname:
branch.remote.projectname = self.name
branch.remote.Save()
url = branch.remote.ReviewUrl(self.UserEmail, validate_certs)
if url is None:
raise UploadError("review not configured", project=self.name)
cmd = ["push", "--progress"]
if dryrun:
cmd.append("-n")
if url.startswith("ssh://"):
cmd.append("--receive-pack=gerrit receive-pack")
# This stops git from pushing all reachable annotated tags when
# push.followTags is configured. Gerrit does not accept any tags
# pushed to a CL.
if git_require((1, 8, 3)):
cmd.append("--no-follow-tags")
for push_option in push_options or []:
cmd.append("-o")
cmd.append(push_option)
cmd.append(url)
if dest_branch.startswith(R_HEADS):
dest_branch = dest_branch[len(R_HEADS) :]
ref_spec = "%s:refs/for/%s" % (R_HEADS + branch.name, dest_branch)
opts = []
if auto_topic:
opts += ["topic=" + branch.name]
opts += ["t=%s" % p for p in hashtags]
# NB: No need to encode labels as they've been validated above.
opts += ["l=%s" % p for p in labels]
opts += ["r=%s" % p for p in people[0]]
opts += ["cc=%s" % p for p in people[1]]
if notify:
opts += ["notify=" + notify]
if private:
opts += ["private"]
if wip:
opts += ["wip"]
if ready:
opts += ["ready"]
if opts:
ref_spec = ref_spec + "%" + ",".join(opts)
cmd.append(ref_spec)
GitCommand(self, cmd, bare=True, verify_command=True).Wait()
if not dryrun:
msg = "posted to %s for %s" % (branch.remote.review, dest_branch)
self.bare_git.UpdateRef(
R_PUB + branch.name, R_HEADS + branch.name, message=msg
)
def _ExtractArchive(self, tarpath, path=None):
"""Extract the given tar on its current location
Args:
tarpath: The path to the actual tar file
"""
try:
with tarfile.open(tarpath, "r") as tar:
tar.extractall(path=path)
return True
except (OSError, tarfile.TarError) as e:
logger.error("error: Cannot extract archive %s: %s", tarpath, e)
return False
def Sync_NetworkHalf(
self,
quiet=False,
verbose=False,
output_redir=None,
is_new=None,
current_branch_only=None,
force_sync=False,
clone_bundle=True,
tags=None,
archive=False,
optimized_fetch=False,
retry_fetches=0,
prune=False,
submodules=False,
ssh_proxy=None,
clone_filter=None,
partial_clone_exclude=set(),
clone_filter_for_depth=None,
):
"""Perform only the network IO portion of the sync process.
Local working directory/branch state is not affected.
"""
if archive and not isinstance(self, MetaProject):
if self.remote.url.startswith(("http://", "https://")):
msg_template = (
"%s: Cannot fetch archives from http/https remotes."
)
msg_args = self.name
msg = msg_template % msg_args
logger.error(msg_template, msg_args)
return SyncNetworkHalfResult(
False, SyncNetworkHalfError(msg, project=self.name)
)
name = self.relpath.replace("\\", "/")
name = name.replace("/", "_")
tarpath = "%s.tar" % name
topdir = self.manifest.topdir
try:
self._FetchArchive(tarpath, cwd=topdir)
except GitError as e:
logger.error("error: %s", e)
return SyncNetworkHalfResult(False, e)
# From now on, we only need absolute tarpath.
tarpath = os.path.join(topdir, tarpath)
if not self._ExtractArchive(tarpath, path=topdir):
return SyncNetworkHalfResult(
True,
SyncNetworkHalfError(
f"Unable to Extract Archive {tarpath}",
project=self.name,
),
)
try:
platform_utils.remove(tarpath)
except OSError as e:
logger.warning("warn: Cannot remove archive %s: %s", tarpath, e)
self._CopyAndLinkFiles()
return SyncNetworkHalfResult(True)
# If the shared object dir already exists, don't try to rebootstrap with
# a clone bundle download. We should have the majority of objects
# already.
if clone_bundle and os.path.exists(self.objdir):
clone_bundle = False
if self.name in partial_clone_exclude:
clone_bundle = True
clone_filter = None
if is_new is None:
is_new = not self.Exists
if is_new:
self._InitGitDir(force_sync=force_sync, quiet=quiet)
else:
self._UpdateHooks(quiet=quiet)
self._InitRemote()
if self.UseAlternates:
# If gitdir/objects is a symlink, migrate it from the old layout.
gitdir_objects = os.path.join(self.gitdir, "objects")
if platform_utils.islink(gitdir_objects):
platform_utils.remove(gitdir_objects, missing_ok=True)
gitdir_alt = os.path.join(self.gitdir, "objects/info/alternates")
if not os.path.exists(gitdir_alt):
os.makedirs(os.path.dirname(gitdir_alt), exist_ok=True)
_lwrite(
gitdir_alt,
os.path.join(
os.path.relpath(self.objdir, gitdir_objects), "objects"
)
+ "\n",
)
if is_new:
alt = os.path.join(self.objdir, "objects/info/alternates")
try:
with open(alt) as fd:
# This works for both absolute and relative alternate
# directories.
alt_dir = os.path.join(
self.objdir, "objects", fd.readline().rstrip()
)
except OSError:
alt_dir = None
else:
alt_dir = None
if (
clone_bundle
and alt_dir is None
and self._ApplyCloneBundle(
initial=is_new, quiet=quiet, verbose=verbose
)
):
is_new = False
if current_branch_only is None:
if self.sync_c:
current_branch_only = True
elif not self.manifest._loaded:
# Manifest cannot check defaults until it syncs.
current_branch_only = False
elif self.manifest.default.sync_c:
current_branch_only = True
if tags is None:
tags = self.sync_tags
if self.clone_depth:
depth = self.clone_depth
else:
depth = self.manifest.manifestProject.depth
if depth and clone_filter_for_depth:
depth = None
clone_filter = clone_filter_for_depth
# See if we can skip the network fetch entirely.
remote_fetched = False
if not (
optimized_fetch
and IsId(self.revisionExpr)
and self._CheckForImmutableRevision()
):
remote_fetched = True
try:
if not self._RemoteFetch(
initial=is_new,
quiet=quiet,
verbose=verbose,
output_redir=output_redir,
alt_dir=alt_dir,
current_branch_only=current_branch_only,
tags=tags,
prune=prune,
depth=depth,
submodules=submodules,
force_sync=force_sync,
ssh_proxy=ssh_proxy,
clone_filter=clone_filter,
retry_fetches=retry_fetches,
):
return SyncNetworkHalfResult(
remote_fetched,
SyncNetworkHalfError(
f"Unable to remote fetch project {self.name}",
project=self.name,
),
)
except RepoError as e:
return SyncNetworkHalfResult(
remote_fetched,
e,
)
mp = self.manifest.manifestProject
dissociate = mp.dissociate
if dissociate:
alternates_file = os.path.join(
self.objdir, "objects/info/alternates"
)
if os.path.exists(alternates_file):
cmd = ["repack", "-a", "-d"]
p = GitCommand(
self,
cmd,
bare=True,
capture_stdout=bool(output_redir),
merge_output=bool(output_redir),
)
if p.stdout and output_redir:
output_redir.write(p.stdout)
if p.Wait() != 0:
return SyncNetworkHalfResult(
remote_fetched,
GitError(
"Unable to repack alternates", project=self.name
),
)
platform_utils.remove(alternates_file)
if self.worktree:
self._InitMRef()
else:
self._InitMirrorHead()
platform_utils.remove(
os.path.join(self.gitdir, "FETCH_HEAD"), missing_ok=True
)
return SyncNetworkHalfResult(remote_fetched)
def PostRepoUpgrade(self):
self._InitHooks()
def _CopyAndLinkFiles(self):
if self.client.isGitcClient:
return
for copyfile in self.copyfiles:
copyfile._Copy()
for linkfile in self.linkfiles:
linkfile._Link()
def GetCommitRevisionId(self):
"""Get revisionId of a commit.
Use this method instead of GetRevisionId to get the id of the commit
rather than the id of the current git object (for example, a tag)
"""
if self.revisionId:
return self.revisionId
if not self.revisionExpr.startswith(R_TAGS):
return self.GetRevisionId(self._allrefs)
try:
return self.bare_git.rev_list(self.revisionExpr, "-1")[0]
except GitError:
raise ManifestInvalidRevisionError(
"revision %s in %s not found" % (self.revisionExpr, self.name)
)
def GetRevisionId(self, all_refs=None):
if self.revisionId:
return self.revisionId
rem = self.GetRemote()
rev = rem.ToLocal(self.revisionExpr)
if all_refs is not None and rev in all_refs:
return all_refs[rev]
try:
return self.bare_git.rev_parse("--verify", "%s^0" % rev)
except GitError:
raise ManifestInvalidRevisionError(
"revision %s in %s not found" % (self.revisionExpr, self.name)
)
def SetRevisionId(self, revisionId):
if self.revisionExpr:
self.upstream = self.revisionExpr
self.revisionId = revisionId
def Sync_LocalHalf(
self, syncbuf, force_sync=False, submodules=False, errors=None
):
"""Perform only the local IO portion of the sync process.
Network access is not required.
"""
if errors is None:
errors = []
def fail(error: Exception):
errors.append(error)
syncbuf.fail(self, error)
if not os.path.exists(self.gitdir):
fail(
LocalSyncFail(
"Cannot checkout %s due to missing network sync; Run "
"`repo sync -n %s` first." % (self.name, self.name),
project=self.name,
)
)
return
self._InitWorkTree(force_sync=force_sync, submodules=submodules)
all_refs = self.bare_ref.all
self.CleanPublishedCache(all_refs)
revid = self.GetRevisionId(all_refs)
# Special case the root of the repo client checkout. Make sure it
# doesn't contain files being checked out to dirs we don't allow.
if self.relpath == ".":
PROTECTED_PATHS = {".repo"}
paths = set(
self.work_git.ls_tree("-z", "--name-only", "--", revid).split(
"\0"
)
)
bad_paths = paths & PROTECTED_PATHS
if bad_paths:
fail(
LocalSyncFail(
"Refusing to checkout project that writes to protected "
"paths: %s" % (", ".join(bad_paths),),
project=self.name,
)
)
return
def _doff():
self._FastForward(revid)
self._CopyAndLinkFiles()
def _dosubmodules():
self._SyncSubmodules(quiet=True)
head = self.work_git.GetHead()
if head.startswith(R_HEADS):
branch = head[len(R_HEADS) :]
try:
head = all_refs[head]
except KeyError:
head = None
else:
branch = None
if branch is None or syncbuf.detach_head:
# Currently on a detached HEAD. The user is assumed to
# not have any local modifications worth worrying about.
if self.IsRebaseInProgress():
fail(_PriorSyncFailedError(project=self.name))
return
if head == revid:
# No changes; don't do anything further.
# Except if the head needs to be detached.
if not syncbuf.detach_head:
# The copy/linkfile config may have changed.
self._CopyAndLinkFiles()
return
else:
lost = self._revlist(not_rev(revid), HEAD)
if lost:
syncbuf.info(self, "discarding %d commits", len(lost))
try:
self._Checkout(revid, quiet=True)
if submodules:
self._SyncSubmodules(quiet=True)
except GitError as e:
fail(e)
return
self._CopyAndLinkFiles()
return
if head == revid:
# No changes; don't do anything further.
#
# The copy/linkfile config may have changed.
self._CopyAndLinkFiles()
return
branch = self.GetBranch(branch)
if not branch.LocalMerge:
# The current branch has no tracking configuration.
# Jump off it to a detached HEAD.
syncbuf.info(
self, "leaving %s; does not track upstream", branch.name
)
try:
self._Checkout(revid, quiet=True)
if submodules:
self._SyncSubmodules(quiet=True)
except GitError as e:
fail(e)
return
self._CopyAndLinkFiles()
return
upstream_gain = self._revlist(not_rev(HEAD), revid)
# See if we can perform a fast forward merge. This can happen if our
# branch isn't in the exact same state as we last published.
try:
self.work_git.merge_base(
"--is-ancestor", HEAD, revid, log_as_error=False
)
# Skip the published logic.
pub = False
except GitError:
pub = self.WasPublished(branch.name, all_refs)
if pub:
not_merged = self._revlist(not_rev(revid), pub)
if not_merged:
if upstream_gain:
# The user has published this branch and some of those
# commits are not yet merged upstream. We do not want
# to rewrite the published commits so we punt.
fail(
LocalSyncFail(
"branch %s is published (but not merged) and is "
"now %d commits behind"
% (branch.name, len(upstream_gain)),
project=self.name,
)
)
return
elif pub == head:
# All published commits are merged, and thus we are a
# strict subset. We can fast-forward safely.
syncbuf.later1(self, _doff)
if submodules:
syncbuf.later1(self, _dosubmodules)
return
# Examine the local commits not in the remote. Find the
# last one attributed to this user, if any.
local_changes = self._revlist(not_rev(revid), HEAD, format="%H %ce")
last_mine = None
cnt_mine = 0
for commit in local_changes:
commit_id, committer_email = commit.split(" ", 1)
if committer_email == self.UserEmail:
last_mine = commit_id
cnt_mine += 1
if not upstream_gain and cnt_mine == len(local_changes):
# The copy/linkfile config may have changed.
self._CopyAndLinkFiles()
return
if self.IsDirty(consider_untracked=False):
fail(_DirtyError(project=self.name))
return
# If the upstream switched on us, warn the user.
if branch.merge != self.revisionExpr:
if branch.merge and self.revisionExpr:
syncbuf.info(
self,
"manifest switched %s...%s",
branch.merge,
self.revisionExpr,
)
elif branch.merge:
syncbuf.info(self, "manifest no longer tracks %s", branch.merge)
if cnt_mine < len(local_changes):
# Upstream rebased. Not everything in HEAD was created by this user.
syncbuf.info(
self,
"discarding %d commits removed from upstream",
len(local_changes) - cnt_mine,
)
branch.remote = self.GetRemote()
if not IsId(self.revisionExpr):
# In case of manifest sync the revisionExpr might be a SHA1.
branch.merge = self.revisionExpr
if not branch.merge.startswith("refs/"):
branch.merge = R_HEADS + branch.merge
branch.Save()
if cnt_mine > 0 and self.rebase:
def _docopyandlink():
self._CopyAndLinkFiles()
def _dorebase():
self._Rebase(upstream="%s^1" % last_mine, onto=revid)
syncbuf.later2(self, _dorebase)
if submodules:
syncbuf.later2(self, _dosubmodules)
syncbuf.later2(self, _docopyandlink)
elif local_changes:
try:
self._ResetHard(revid)
if submodules:
self._SyncSubmodules(quiet=True)
self._CopyAndLinkFiles()
except GitError as e:
fail(e)
return
else:
syncbuf.later1(self, _doff)
if submodules:
syncbuf.later1(self, _dosubmodules)
def AddCopyFile(self, src, dest, topdir):
"""Mark |src| for copying to |dest| (relative to |topdir|).
No filesystem changes occur here. Actual copying happens later on.
Paths should have basic validation run on them before being queued.
Further checking will be handled when the actual copy happens.
"""
self.copyfiles.append(_CopyFile(self.worktree, src, topdir, dest))
def AddLinkFile(self, src, dest, topdir):
"""Mark |dest| to create a symlink (relative to |topdir|) pointing to
|src|.
No filesystem changes occur here. Actual linking happens later on.
Paths should have basic validation run on them before being queued.
Further checking will be handled when the actual link happens.
"""
self.linkfiles.append(_LinkFile(self.worktree, src, topdir, dest))
def AddAnnotation(self, name, value, keep):
self.annotations.append(Annotation(name, value, keep))
def DownloadPatchSet(self, change_id, patch_id):
"""Download a single patch set of a single change to FETCH_HEAD."""
remote = self.GetRemote()
cmd = ["fetch", remote.name]
cmd.append(
"refs/changes/%2.2d/%d/%d" % (change_id % 100, change_id, patch_id)
)
GitCommand(self, cmd, bare=True, verify_command=True).Wait()
return DownloadedChange(
self,
self.GetRevisionId(),
change_id,
patch_id,
self.bare_git.rev_parse("FETCH_HEAD"),
)
def DeleteWorktree(self, quiet=False, force=False):
"""Delete the source checkout and any other housekeeping tasks.
This currently leaves behind the internal .repo/ cache state. This
helps when switching branches or manifest changes get reverted as we
don't have to redownload all the git objects. But we should do some GC
at some point.
Args:
quiet: Whether to hide normal messages.
force: Always delete tree even if dirty.
Returns:
True if the worktree was completely cleaned out.
"""
if self.IsDirty():
if force:
logger.warning(
"warning: %s: Removing dirty project: uncommitted changes "
"lost.",
self.RelPath(local=False),
)
else:
msg = (
"error: %s: Cannot remove project: uncommitted"
"changes are present.\n" % self.RelPath(local=False)
)
logger.error(msg)
raise DeleteDirtyWorktreeError(msg, project=self)
if not quiet:
print(
"%s: Deleting obsolete checkout." % (self.RelPath(local=False),)
)
# Unlock and delink from the main worktree. We don't use git's worktree
# remove because it will recursively delete projects -- we handle that
# ourselves below. https://crbug.com/git/48
if self.use_git_worktrees:
needle = platform_utils.realpath(self.gitdir)
# Find the git worktree commondir under .repo/worktrees/.
output = self.bare_git.worktree("list", "--porcelain").splitlines()[
0
]
assert output.startswith("worktree "), output
commondir = output[9:]
# Walk each of the git worktrees to see where they point.
configs = os.path.join(commondir, "worktrees")
for name in os.listdir(configs):
gitdir = os.path.join(configs, name, "gitdir")
with open(gitdir) as fp:
relpath = fp.read().strip()
# Resolve the checkout path and see if it matches this project.
fullpath = platform_utils.realpath(
os.path.join(configs, name, relpath)
)
if fullpath == needle:
platform_utils.rmtree(os.path.join(configs, name))
# Delete the .git directory first, so we're less likely to have a
# partially working git repository around. There shouldn't be any git
# projects here, so rmtree works.
# Try to remove plain files first in case of git worktrees. If this
# fails for any reason, we'll fall back to rmtree, and that'll display
# errors if it can't remove things either.
try:
platform_utils.remove(self.gitdir)
except OSError:
pass
try:
platform_utils.rmtree(self.gitdir)
except OSError as e:
if e.errno != errno.ENOENT:
logger.error("error: %s: %s", self.gitdir, e)
logger.error(
"error: %s: Failed to delete obsolete checkout; remove "
"manually, then run `repo sync -l`.",
self.RelPath(local=False),
)
raise DeleteWorktreeError(aggregate_errors=[e])
# Delete everything under the worktree, except for directories that
# contain another git project.
dirs_to_remove = []
failed = False
errors = []
for root, dirs, files in platform_utils.walk(self.worktree):
for f in files:
path = os.path.join(root, f)
try:
platform_utils.remove(path)
except OSError as e:
if e.errno != errno.ENOENT:
logger.error("error: %s: Failed to remove: %s", path, e)
failed = True
errors.append(e)
dirs[:] = [
d
for d in dirs
if not os.path.lexists(os.path.join(root, d, ".git"))
]
dirs_to_remove += [
os.path.join(root, d)
for d in dirs
if os.path.join(root, d) not in dirs_to_remove
]
for d in reversed(dirs_to_remove):
if platform_utils.islink(d):
try:
platform_utils.remove(d)
except OSError as e:
if e.errno != errno.ENOENT:
logger.error("error: %s: Failed to remove: %s", d, e)
failed = True
errors.append(e)
elif not platform_utils.listdir(d):
try:
platform_utils.rmdir(d)
except OSError as e:
if e.errno != errno.ENOENT:
logger.error("error: %s: Failed to remove: %s", d, e)
failed = True
errors.append(e)
if failed:
logger.error(
"error: %s: Failed to delete obsolete checkout.",
self.RelPath(local=False),
)
logger.error(
" Remove manually, then run `repo sync -l`.",
)
raise DeleteWorktreeError(aggregate_errors=errors)
# Try deleting parent dirs if they are empty.
path = self.worktree
while path != self.manifest.topdir:
try:
platform_utils.rmdir(path)
except OSError as e:
if e.errno != errno.ENOENT:
break
path = os.path.dirname(path)
return True
def StartBranch(self, name, branch_merge="", revision=None):
"""Create a new branch off the manifest's revision."""
if not branch_merge:
branch_merge = self.revisionExpr
head = self.work_git.GetHead()
if head == (R_HEADS + name):
return True
all_refs = self.bare_ref.all
if R_HEADS + name in all_refs:
GitCommand(
self, ["checkout", "-q", name, "--"], verify_command=True
).Wait()
return True
branch = self.GetBranch(name)
branch.remote = self.GetRemote()
branch.merge = branch_merge
if not branch.merge.startswith("refs/") and not IsId(branch_merge):
branch.merge = R_HEADS + branch_merge
if revision is None:
revid = self.GetRevisionId(all_refs)
else:
revid = self.work_git.rev_parse(revision)
if head.startswith(R_HEADS):
try:
head = all_refs[head]
except KeyError:
head = None
if revid and head and revid == head:
ref = R_HEADS + name
self.work_git.update_ref(ref, revid)
self.work_git.symbolic_ref(HEAD, ref)
branch.Save()
return True
GitCommand(
self,
["checkout", "-q", "-b", branch.name, revid],
verify_command=True,
).Wait()
branch.Save()
return True
def CheckoutBranch(self, name):
"""Checkout a local topic branch.
Args:
name: The name of the branch to checkout.
Returns:
True if the checkout succeeded; False if the
branch doesn't exist.
"""
rev = R_HEADS + name
head = self.work_git.GetHead()
if head == rev:
# Already on the branch.
return True
all_refs = self.bare_ref.all
try:
revid = all_refs[rev]
except KeyError:
# Branch does not exist in this project.
return False
if head.startswith(R_HEADS):
try:
head = all_refs[head]
except KeyError:
head = None
if head == revid:
# Same revision; just update HEAD to point to the new
# target branch, but otherwise take no other action.
_lwrite(
self.work_git.GetDotgitPath(subpath=HEAD),
"ref: %s%s\n" % (R_HEADS, name),
)
return True
GitCommand(
self,
["checkout", name, "--"],
capture_stdout=True,
capture_stderr=True,
verify_command=True,
).Wait()
return True
def AbandonBranch(self, name):
"""Destroy a local topic branch.
Args:
name: The name of the branch to abandon.
Returns:
True if the abandon succeeded; Raises GitCommandError if it didn't;
None if the branch didn't exist.
"""
rev = R_HEADS + name
all_refs = self.bare_ref.all
if rev not in all_refs:
# Doesn't exist
return None
head = self.work_git.GetHead()
if head == rev:
# We can't destroy the branch while we are sitting
# on it. Switch to a detached HEAD.
head = all_refs[head]
revid = self.GetRevisionId(all_refs)
if head == revid:
_lwrite(
self.work_git.GetDotgitPath(subpath=HEAD), "%s\n" % revid
)
else:
self._Checkout(revid, quiet=True)
GitCommand(
self,
["branch", "-D", name],
capture_stdout=True,
capture_stderr=True,
verify_command=True,
).Wait()
return True
def PruneHeads(self):
"""Prune any topic branches already merged into upstream."""
cb = self.CurrentBranch
kill = []
left = self._allrefs
for name in left.keys():
if name.startswith(R_HEADS):
name = name[len(R_HEADS) :]
if cb is None or name != cb:
kill.append(name)
# Minor optimization: If there's nothing to prune, then don't try to
# read any project state.
if not kill and not cb:
return []
rev = self.GetRevisionId(left)
if (
cb is not None
and not self._revlist(HEAD + "..." + rev)
and not self.IsDirty(consider_untracked=False)
):
self.work_git.DetachHead(HEAD)
kill.append(cb)
if kill:
old = self.bare_git.GetHead()
try:
self.bare_git.DetachHead(rev)
b = ["branch", "-d"]
b.extend(kill)
b = GitCommand(
self, b, bare=True, capture_stdout=True, capture_stderr=True
)
b.Wait()
finally:
if IsId(old):
self.bare_git.DetachHead(old)
else:
self.bare_git.SetHead(old)
left = self._allrefs
for branch in kill:
if (R_HEADS + branch) not in left:
self.CleanPublishedCache()
break
if cb and cb not in kill:
kill.append(cb)
kill.sort()
kept = []
for branch in kill:
if R_HEADS + branch in left:
branch = self.GetBranch(branch)
base = branch.LocalMerge
if not base:
base = rev
kept.append(ReviewableBranch(self, branch, base))
return kept
def GetRegisteredSubprojects(self):
result = []
def rec(subprojects):
if not subprojects:
return
result.extend(subprojects)
for p in subprojects:
rec(p.subprojects)
rec(self.subprojects)
return result
def _GetSubmodules(self):
# Unfortunately we cannot call `git submodule status --recursive` here
# because the working tree might not exist yet, and it cannot be used
# without a working tree in its current implementation.
def get_submodules(gitdir, rev):
# Parse .gitmodules for submodule sub_paths and sub_urls.
sub_paths, sub_urls = parse_gitmodules(gitdir, rev)
if not sub_paths:
return []
# Run `git ls-tree` to read SHAs of submodule object, which happen
# to be revision of submodule repository.
sub_revs = git_ls_tree(gitdir, rev, sub_paths)
submodules = []
for sub_path, sub_url in zip(sub_paths, sub_urls):
try:
sub_rev = sub_revs[sub_path]
except KeyError:
# Ignore non-exist submodules.
continue
submodules.append((sub_rev, sub_path, sub_url))
return submodules
re_path = re.compile(r"^submodule\.(.+)\.path=(.*)$")
re_url = re.compile(r"^submodule\.(.+)\.url=(.*)$")
def parse_gitmodules(gitdir, rev):
cmd = ["cat-file", "blob", "%s:.gitmodules" % rev]
try:
p = GitCommand(
None,
cmd,
capture_stdout=True,
capture_stderr=True,
bare=True,
gitdir=gitdir,
)
except GitError:
return [], []
if p.Wait() != 0:
return [], []
gitmodules_lines = []
fd, temp_gitmodules_path = tempfile.mkstemp()
try:
os.write(fd, p.stdout.encode("utf-8"))
os.close(fd)
cmd = ["config", "--file", temp_gitmodules_path, "--list"]
p = GitCommand(
None,
cmd,
capture_stdout=True,
capture_stderr=True,
bare=True,
gitdir=gitdir,
)
if p.Wait() != 0:
return [], []
gitmodules_lines = p.stdout.split("\n")
except GitError:
return [], []
finally:
platform_utils.remove(temp_gitmodules_path)
names = set()
paths = {}
urls = {}
for line in gitmodules_lines:
if not line:
continue
m = re_path.match(line)
if m:
names.add(m.group(1))
paths[m.group(1)] = m.group(2)
continue
m = re_url.match(line)
if m:
names.add(m.group(1))
urls[m.group(1)] = m.group(2)
continue
names = sorted(names)
return (
[paths.get(name, "") for name in names],
[urls.get(name, "") for name in names],
)
def git_ls_tree(gitdir, rev, paths):
cmd = ["ls-tree", rev, "--"]
cmd.extend(paths)
try:
p = GitCommand(
None,
cmd,
capture_stdout=True,
capture_stderr=True,
bare=True,
gitdir=gitdir,
)
except GitError:
return []
if p.Wait() != 0:
return []
objects = {}
for line in p.stdout.split("\n"):
if not line.strip():
continue
object_rev, object_path = line.split()[2:4]
objects[object_path] = object_rev
return objects
try:
rev = self.GetRevisionId()
except GitError:
return []
return get_submodules(self.gitdir, rev)
def GetDerivedSubprojects(self):
result = []
if not self.Exists:
# If git repo does not exist yet, querying its submodules will
# mess up its states; so return here.
return result
for rev, path, url in self._GetSubmodules():
name = self.manifest.GetSubprojectName(self, path)
(
relpath,
worktree,
gitdir,
objdir,
) = self.manifest.GetSubprojectPaths(self, name, path)
project = self.manifest.paths.get(relpath)
if project:
result.extend(project.GetDerivedSubprojects())
continue
if url.startswith(".."):
url = urllib.parse.urljoin("%s/" % self.remote.url, url)
remote = RemoteSpec(
self.remote.name,
url=url,
pushUrl=self.remote.pushUrl,
review=self.remote.review,
revision=self.remote.revision,
)
subproject = Project(
manifest=self.manifest,
name=name,
remote=remote,
gitdir=gitdir,
objdir=objdir,
worktree=worktree,
relpath=relpath,
revisionExpr=rev,
revisionId=rev,
rebase=self.rebase,
groups=self.groups,
sync_c=self.sync_c,
sync_s=self.sync_s,
sync_tags=self.sync_tags,
parent=self,
is_derived=True,
)
result.append(subproject)
result.extend(subproject.GetDerivedSubprojects())
return result
def EnableRepositoryExtension(self, key, value="true", version=1):
"""Enable git repository extension |key| with |value|.
Args:
key: The extension to enabled. Omit the "extensions." prefix.
value: The value to use for the extension.
version: The minimum git repository version needed.
"""
# Make sure the git repo version is new enough already.
found_version = self.config.GetInt("core.repositoryFormatVersion")
if found_version is None:
found_version = 0
if found_version < version:
self.config.SetString("core.repositoryFormatVersion", str(version))
# Enable the extension!
self.config.SetString("extensions.%s" % (key,), value)
def ResolveRemoteHead(self, name=None):
"""Find out what the default branch (HEAD) points to.
Normally this points to refs/heads/master, but projects are moving to
main. Support whatever the server uses rather than hardcoding "master"
ourselves.
"""
if name is None:
name = self.remote.name
# The output will look like (NB: tabs are separators):
# ref: refs/heads/master HEAD
# 5f6803b100bb3cd0f534e96e88c91373e8ed1c44 HEAD
output = self.bare_git.ls_remote(
"-q", "--symref", "--exit-code", name, "HEAD"
)
for line in output.splitlines():
lhs, rhs = line.split("\t", 1)
if rhs == "HEAD" and lhs.startswith("ref:"):
return lhs[4:].strip()
return None
def _CheckForImmutableRevision(self):
try:
# if revision (sha or tag) is not present then following function
# throws an error.
self.bare_git.rev_list(
"-1",
"--missing=allow-any",
"%s^0" % self.revisionExpr,
"--",
log_as_error=False,
)
if self.upstream:
rev = self.GetRemote().ToLocal(self.upstream)
self.bare_git.rev_list(
"-1",
"--missing=allow-any",
"%s^0" % rev,
"--",
log_as_error=False,
)
self.bare_git.merge_base(
"--is-ancestor",
self.revisionExpr,
rev,
log_as_error=False,
)
return True
except GitError:
# There is no such persistent revision. We have to fetch it.
return False
def _FetchArchive(self, tarpath, cwd=None):
cmd = ["archive", "-v", "-o", tarpath]
cmd.append("--remote=%s" % self.remote.url)
cmd.append("--prefix=%s/" % self.RelPath(local=False))
cmd.append(self.revisionExpr)
command = GitCommand(
self,
cmd,
cwd=cwd,
capture_stdout=True,
capture_stderr=True,
verify_command=True,
)
command.Wait()
def _RemoteFetch(
self,
name=None,
current_branch_only=False,
initial=False,
quiet=False,
verbose=False,
output_redir=None,
alt_dir=None,
tags=True,
prune=False,
depth=None,
submodules=False,
ssh_proxy=None,
force_sync=False,
clone_filter=None,
retry_fetches=2,
retry_sleep_initial_sec=4.0,
retry_exp_factor=2.0,
) -> bool:
tag_name = None
# The depth should not be used when fetching to a mirror because
# it will result in a shallow repository that cannot be cloned or
# fetched from.
# The repo project should also never be synced with partial depth.
if self.manifest.IsMirror or self.relpath == ".repo/repo":
depth = None
if depth:
current_branch_only = True
is_sha1 = bool(IsId(self.revisionExpr))
if current_branch_only:
if self.revisionExpr.startswith(R_TAGS):
# This is a tag and its commit id should never change.
tag_name = self.revisionExpr[len(R_TAGS) :]
elif self.upstream and self.upstream.startswith(R_TAGS):
# This is a tag and its commit id should never change.
tag_name = self.upstream[len(R_TAGS) :]
if is_sha1 or tag_name is not None:
if self._CheckForImmutableRevision():
if verbose:
print(
"Skipped fetching project %s (already have "
"persistent ref)" % self.name
)
return True
if is_sha1 and not depth:
# When syncing a specific commit and --depth is not set:
# * if upstream is explicitly specified and is not a sha1, fetch
# only upstream as users expect only upstream to be fetch.
# Note: The commit might not be in upstream in which case the
# sync will fail.
# * otherwise, fetch all branches to make sure we end up with
# the specific commit.
if self.upstream:
current_branch_only = not IsId(self.upstream)
else:
current_branch_only = False
if not name:
name = self.remote.name
remote = self.GetRemote(name)
if not remote.PreConnectFetch(ssh_proxy):
ssh_proxy = None
if initial:
if alt_dir and "objects" == os.path.basename(alt_dir):
ref_dir = os.path.dirname(alt_dir)
packed_refs = os.path.join(self.gitdir, "packed-refs")
all_refs = self.bare_ref.all
ids = set(all_refs.values())
tmp = set()
for r, ref_id in GitRefs(ref_dir).all.items():
if r not in all_refs:
if r.startswith(R_TAGS) or remote.WritesTo(r):
all_refs[r] = ref_id
ids.add(ref_id)
continue
if ref_id in ids:
continue
r = "refs/_alt/%s" % ref_id
all_refs[r] = ref_id
ids.add(ref_id)
tmp.add(r)
tmp_packed_lines = []
old_packed_lines = []
for r in sorted(all_refs):
line = "%s %s\n" % (all_refs[r], r)
tmp_packed_lines.append(line)
if r not in tmp:
old_packed_lines.append(line)
tmp_packed = "".join(tmp_packed_lines)
old_packed = "".join(old_packed_lines)
_lwrite(packed_refs, tmp_packed)
else:
alt_dir = None
cmd = ["fetch"]
if clone_filter:
git_require((2, 19, 0), fail=True, msg="partial clones")
cmd.append("--filter=%s" % clone_filter)
self.EnableRepositoryExtension("partialclone", self.remote.name)
if depth:
cmd.append("--depth=%s" % depth)
else:
# If this repo has shallow objects, then we don't know which refs
# have shallow objects or not. Tell git to unshallow all fetched
# refs. Don't do this with projects that don't have shallow
# objects, since it is less efficient.
if os.path.exists(os.path.join(self.gitdir, "shallow")):
cmd.append("--depth=2147483647")
if not verbose:
cmd.append("--quiet")
if not quiet and sys.stdout.isatty():
cmd.append("--progress")
if not self.worktree:
cmd.append("--update-head-ok")
cmd.append(name)
if force_sync:
cmd.append("--force")
if prune:
cmd.append("--prune")
# Always pass something for --recurse-submodules, git with GIT_DIR
# behaves incorrectly when not given `--recurse-submodules=no`.
# (b/218891912)
cmd.append(
f'--recurse-submodules={"on-demand" if submodules else "no"}'
)
spec = []
if not current_branch_only:
# Fetch whole repo.
spec.append(
str(("+refs/heads/*:") + remote.ToLocal("refs/heads/*"))
)
elif tag_name is not None:
spec.append("tag")
spec.append(tag_name)
if self.manifest.IsMirror and not current_branch_only:
branch = None
else:
branch = self.revisionExpr
if (
not self.manifest.IsMirror
and is_sha1
and depth
and git_require((1, 8, 3))
):
# Shallow checkout of a specific commit, fetch from that commit and
# not the heads only as the commit might be deeper in the history.
spec.append(branch)
if self.upstream:
spec.append(self.upstream)
else:
if is_sha1:
branch = self.upstream
if branch is not None and branch.strip():
if not branch.startswith("refs/"):
branch = R_HEADS + branch
spec.append(str(("+%s:" % branch) + remote.ToLocal(branch)))
# If mirroring repo and we cannot deduce the tag or branch to fetch,
# fetch whole repo.
if self.manifest.IsMirror and not spec:
spec.append(
str(("+refs/heads/*:") + remote.ToLocal("refs/heads/*"))
)
# If using depth then we should not get all the tags since they may
# be outside of the depth.
if not tags or depth:
cmd.append("--no-tags")
else:
cmd.append("--tags")
spec.append(str(("+refs/tags/*:") + remote.ToLocal("refs/tags/*")))
cmd.extend(spec)
# At least one retry minimum due to git remote prune.
retry_fetches = max(retry_fetches, 2)
retry_cur_sleep = retry_sleep_initial_sec
ok = prune_tried = False
for try_n in range(retry_fetches):
verify_command = try_n == retry_fetches - 1
gitcmd = GitCommand(
self,
cmd,
bare=True,
objdir=os.path.join(self.objdir, "objects"),
ssh_proxy=ssh_proxy,
merge_output=True,
capture_stdout=quiet or bool(output_redir),
verify_command=verify_command,
)
if gitcmd.stdout and not quiet and output_redir:
output_redir.write(gitcmd.stdout)
ret = gitcmd.Wait()
if ret == 0:
ok = True
break
# Retry later due to HTTP 429 Too Many Requests.
elif (
gitcmd.stdout
and "error:" in gitcmd.stdout
and "HTTP 429" in gitcmd.stdout
):
# Fallthru to sleep+retry logic at the bottom.
pass
# Try to prune remote branches once in case there are conflicts.
# For example, if the remote had refs/heads/upstream, but deleted
# that and now has refs/heads/upstream/foo.
elif (
gitcmd.stdout
and "error:" in gitcmd.stdout
and "git remote prune" in gitcmd.stdout
and not prune_tried
):
prune_tried = True
prunecmd = GitCommand(
self,
["remote", "prune", name],
bare=True,
ssh_proxy=ssh_proxy,
)
ret = prunecmd.Wait()
if ret:
break
print(
"retrying fetch after pruning remote branches",
file=output_redir,
)
# Continue right away so we don't sleep as we shouldn't need to.
continue
elif current_branch_only and is_sha1 and ret == 128:
# Exit code 128 means "couldn't find the ref you asked for"; if
# we're in sha1 mode, we just tried sync'ing from the upstream
# field; it doesn't exist, thus abort the optimization attempt
# and do a full sync.
break
elif ret < 0:
# Git died with a signal, exit immediately.
break
# Figure out how long to sleep before the next attempt, if there is
# one.
if not verbose and gitcmd.stdout:
print(
"\n%s:\n%s" % (self.name, gitcmd.stdout),
end="",
file=output_redir,
)
if try_n < retry_fetches - 1:
print(
"%s: sleeping %s seconds before retrying"
% (self.name, retry_cur_sleep),
file=output_redir,
)
time.sleep(retry_cur_sleep)
retry_cur_sleep = min(
retry_exp_factor * retry_cur_sleep, MAXIMUM_RETRY_SLEEP_SEC
)
retry_cur_sleep *= 1 - random.uniform(
-RETRY_JITTER_PERCENT, RETRY_JITTER_PERCENT
)
if initial:
if alt_dir:
if old_packed != "":
_lwrite(packed_refs, old_packed)
else:
platform_utils.remove(packed_refs)
self.bare_git.pack_refs("--all", "--prune")
if is_sha1 and current_branch_only:
# We just synced the upstream given branch; verify we
# got what we wanted, else trigger a second run of all
# refs.
if not self._CheckForImmutableRevision():
# Sync the current branch only with depth set to None.
# We always pass depth=None down to avoid infinite recursion.
return self._RemoteFetch(
name=name,
quiet=quiet,
verbose=verbose,
output_redir=output_redir,
current_branch_only=current_branch_only and depth,
initial=False,
alt_dir=alt_dir,
tags=tags,
depth=None,
ssh_proxy=ssh_proxy,
clone_filter=clone_filter,
)
return ok
def _ApplyCloneBundle(self, initial=False, quiet=False, verbose=False):
if initial and (
self.manifest.manifestProject.depth or self.clone_depth
):
return False
remote = self.GetRemote()
bundle_url = remote.url + "/clone.bundle"
bundle_url = GitConfig.ForUser().UrlInsteadOf(bundle_url)
if GetSchemeFromUrl(bundle_url) not in (
"http",
"https",
"persistent-http",
"persistent-https",
):
return False
bundle_dst = os.path.join(self.gitdir, "clone.bundle")
bundle_tmp = os.path.join(self.gitdir, "clone.bundle.tmp")
exist_dst = os.path.exists(bundle_dst)
exist_tmp = os.path.exists(bundle_tmp)
if not initial and not exist_dst and not exist_tmp:
return False
if not exist_dst:
exist_dst = self._FetchBundle(
bundle_url, bundle_tmp, bundle_dst, quiet, verbose
)
if not exist_dst:
return False
cmd = ["fetch"]
if not verbose:
cmd.append("--quiet")
if not quiet and sys.stdout.isatty():
cmd.append("--progress")
if not self.worktree:
cmd.append("--update-head-ok")
cmd.append(bundle_dst)
for f in remote.fetch:
cmd.append(str(f))
cmd.append("+refs/tags/*:refs/tags/*")
ok = (
GitCommand(
self,
cmd,
bare=True,
objdir=os.path.join(self.objdir, "objects"),
).Wait()
== 0
)
platform_utils.remove(bundle_dst, missing_ok=True)
platform_utils.remove(bundle_tmp, missing_ok=True)
return ok
def _FetchBundle(self, srcUrl, tmpPath, dstPath, quiet, verbose):
platform_utils.remove(dstPath, missing_ok=True)
cmd = ["curl", "--fail", "--output", tmpPath, "--netrc", "--location"]
if quiet:
cmd += ["--silent", "--show-error"]
if os.path.exists(tmpPath):
size = os.stat(tmpPath).st_size
if size >= 1024:
cmd += ["--continue-at", "%d" % (size,)]
else:
platform_utils.remove(tmpPath)
with GetUrlCookieFile(srcUrl, quiet) as (cookiefile, proxy):
if cookiefile:
cmd += ["--cookie", cookiefile]
if proxy:
cmd += ["--proxy", proxy]
elif "http_proxy" in os.environ and "darwin" == sys.platform:
cmd += ["--proxy", os.environ["http_proxy"]]
if srcUrl.startswith("persistent-https"):
srcUrl = "http" + srcUrl[len("persistent-https") :]
elif srcUrl.startswith("persistent-http"):
srcUrl = "http" + srcUrl[len("persistent-http") :]
cmd += [srcUrl]
proc = None
with Trace("Fetching bundle: %s", " ".join(cmd)):
if verbose:
print("%s: Downloading bundle: %s" % (self.name, srcUrl))
stdout = None if verbose else subprocess.PIPE
stderr = None if verbose else subprocess.STDOUT
try:
proc = subprocess.Popen(cmd, stdout=stdout, stderr=stderr)
except OSError:
return False
(output, _) = proc.communicate()
curlret = proc.returncode
if curlret == 22:
# From curl man page:
# 22: HTTP page not retrieved. The requested url was not found
# or returned another error with the HTTP error code being 400
# or above. This return code only appears if -f, --fail is used.
if verbose:
print(
"%s: Unable to retrieve clone.bundle; ignoring."
% self.name
)
if output:
print("Curl output:\n%s" % output)
return False
elif curlret and not verbose and output:
logger.error("%s", output)
if os.path.exists(tmpPath):
if curlret == 0 and self._IsValidBundle(tmpPath, quiet):
platform_utils.rename(tmpPath, dstPath)
return True
else:
platform_utils.remove(tmpPath)
return False
else:
return False
def _IsValidBundle(self, path, quiet):
try:
with open(path, "rb") as f:
if f.read(16) == b"# v2 git bundle\n":
return True
else:
if not quiet:
logger.error("Invalid clone.bundle file; ignoring.")
return False
except OSError:
return False
def _Checkout(self, rev, quiet=False):
cmd = ["checkout"]
if quiet:
cmd.append("-q")
cmd.append(rev)
cmd.append("--")
if GitCommand(self, cmd).Wait() != 0:
if self._allrefs:
raise GitError(
"%s checkout %s " % (self.name, rev), project=self.name
)
def _CherryPick(self, rev, ffonly=False, record_origin=False):
cmd = ["cherry-pick"]
if ffonly:
cmd.append("--ff")
if record_origin:
cmd.append("-x")
cmd.append(rev)
cmd.append("--")
if GitCommand(self, cmd).Wait() != 0:
if self._allrefs:
raise GitError(
"%s cherry-pick %s " % (self.name, rev), project=self.name
)
def _LsRemote(self, refs):
cmd = ["ls-remote", self.remote.name, refs]
p = GitCommand(self, cmd, capture_stdout=True)
if p.Wait() == 0:
return p.stdout
return None
def _Revert(self, rev):
cmd = ["revert"]
cmd.append("--no-edit")
cmd.append(rev)
cmd.append("--")
if GitCommand(self, cmd).Wait() != 0:
if self._allrefs:
raise GitError(
"%s revert %s " % (self.name, rev), project=self.name
)
def _ResetHard(self, rev, quiet=True):
cmd = ["reset", "--hard"]
if quiet:
cmd.append("-q")
cmd.append(rev)
if GitCommand(self, cmd).Wait() != 0:
raise GitError(
"%s reset --hard %s " % (self.name, rev), project=self.name
)
def _SyncSubmodules(self, quiet=True):
cmd = ["submodule", "update", "--init", "--recursive"]
if quiet:
cmd.append("-q")
if GitCommand(self, cmd).Wait() != 0:
raise GitError(
"%s submodule update --init --recursive " % self.name,
project=self.name,
)
def _Rebase(self, upstream, onto=None):
cmd = ["rebase"]
if onto is not None:
cmd.extend(["--onto", onto])
cmd.append(upstream)
if GitCommand(self, cmd).Wait() != 0:
raise GitError(
"%s rebase %s " % (self.name, upstream), project=self.name
)
def _FastForward(self, head, ffonly=False):
cmd = ["merge", "--no-stat", head]
if ffonly:
cmd.append("--ff-only")
if GitCommand(self, cmd).Wait() != 0:
raise GitError(
"%s merge %s " % (self.name, head), project=self.name
)
def _InitGitDir(self, mirror_git=None, force_sync=False, quiet=False):
init_git_dir = not os.path.exists(self.gitdir)
init_obj_dir = not os.path.exists(self.objdir)
try:
# Initialize the bare repository, which contains all of the objects.
if init_obj_dir:
os.makedirs(self.objdir)
self.bare_objdir.init()
self._UpdateHooks(quiet=quiet)
if self.use_git_worktrees:
# Enable per-worktree config file support if possible. This
# is more a nice-to-have feature for users rather than a
# hard requirement.
if git_require((2, 20, 0)):
self.EnableRepositoryExtension("worktreeConfig")
# If we have a separate directory to hold refs, initialize it as
# well.
if self.objdir != self.gitdir:
if init_git_dir:
os.makedirs(self.gitdir)
if init_obj_dir or init_git_dir:
self._ReferenceGitDir(
self.objdir, self.gitdir, copy_all=True
)
try:
self._CheckDirReference(self.objdir, self.gitdir)
except GitError as e:
if force_sync:
logger.error(
"Retrying clone after deleting %s", self.gitdir
)
try:
platform_utils.rmtree(
platform_utils.realpath(self.gitdir)
)
if self.worktree and os.path.exists(
platform_utils.realpath(self.worktree)
):
platform_utils.rmtree(
platform_utils.realpath(self.worktree)
)
return self._InitGitDir(
mirror_git=mirror_git,
force_sync=False,
quiet=quiet,
)
except Exception:
raise e
raise e
if init_git_dir:
mp = self.manifest.manifestProject
ref_dir = mp.reference or ""
def _expanded_ref_dirs():
"""Iterate through possible git reference dir paths."""
name = self.name + ".git"
yield mirror_git or os.path.join(ref_dir, name)
for prefix in "", self.remote.name:
yield os.path.join(
ref_dir, ".repo", "project-objects", prefix, name
)
yield os.path.join(
ref_dir, ".repo", "worktrees", prefix, name
)
if ref_dir or mirror_git:
found_ref_dir = None
for path in _expanded_ref_dirs():
if os.path.exists(path):
found_ref_dir = path
break
ref_dir = found_ref_dir
if ref_dir:
if not os.path.isabs(ref_dir):
# The alternate directory is relative to the object
# database.
ref_dir = os.path.relpath(
ref_dir, os.path.join(self.objdir, "objects")
)
_lwrite(
os.path.join(
self.objdir, "objects/info/alternates"
),
os.path.join(ref_dir, "objects") + "\n",
)
m = self.manifest.manifestProject.config
for key in ["user.name", "user.email"]:
if m.Has(key, include_defaults=False):
self.config.SetString(key, m.GetString(key))
if not self.manifest.EnableGitLfs:
self.config.SetString(
"filter.lfs.smudge", "git-lfs smudge --skip -- %f"
)
self.config.SetString(
"filter.lfs.process", "git-lfs filter-process --skip"
)
self.config.SetBoolean(
"core.bare", True if self.manifest.IsMirror else None
)
except Exception:
if init_obj_dir and os.path.exists(self.objdir):
platform_utils.rmtree(self.objdir)
if init_git_dir and os.path.exists(self.gitdir):
platform_utils.rmtree(self.gitdir)
raise
def _UpdateHooks(self, quiet=False):
if os.path.exists(self.objdir):
self._InitHooks(quiet=quiet)
def _InitHooks(self, quiet=False):
hooks = platform_utils.realpath(os.path.join(self.objdir, "hooks"))
if not os.path.exists(hooks):
os.makedirs(hooks)
# Delete sample hooks. They're noise.
for hook in glob.glob(os.path.join(hooks, "*.sample")):
try:
platform_utils.remove(hook, missing_ok=True)
except PermissionError:
pass
for stock_hook in _ProjectHooks():
name = os.path.basename(stock_hook)
if (
name in ("commit-msg",)
and not self.remote.review
and self is not self.manifest.manifestProject
):
# Don't install a Gerrit Code Review hook if this
# project does not appear to use it for reviews.
#
# Since the manifest project is one of those, but also
# managed through gerrit, it's excluded.
continue
dst = os.path.join(hooks, name)
if platform_utils.islink(dst):
continue
if os.path.exists(dst):
# If the files are the same, we'll leave it alone. We create
# symlinks below by default but fallback to hardlinks if the OS
# blocks them. So if we're here, it's probably because we made a
# hardlink below.
if not filecmp.cmp(stock_hook, dst, shallow=False):
if not quiet:
logger.warning(
"warn: %s: Not replacing locally modified %s hook",
self.RelPath(local=False),
name,
)
continue
try:
platform_utils.symlink(
os.path.relpath(stock_hook, os.path.dirname(dst)), dst
)
except OSError as e:
if e.errno == errno.EPERM:
try:
os.link(stock_hook, dst)
except OSError:
raise GitError(
self._get_symlink_error_message(), project=self.name
)
else:
raise
def _InitRemote(self):
if self.remote.url:
remote = self.GetRemote()
remote.url = self.remote.url
remote.pushUrl = self.remote.pushUrl
remote.review = self.remote.review
remote.projectname = self.name
if self.worktree:
remote.ResetFetch(mirror=False)
else:
remote.ResetFetch(mirror=True)
remote.Save()
def _InitMRef(self):
"""Initialize the pseudo m/<manifest branch> ref."""
if self.manifest.branch:
if self.use_git_worktrees:
# Set up the m/ space to point to the worktree-specific ref
# space. We'll update the worktree-specific ref space on each
# checkout.
ref = R_M + self.manifest.branch
if not self.bare_ref.symref(ref):
self.bare_git.symbolic_ref(
"-m",
"redirecting to worktree scope",
ref,
R_WORKTREE_M + self.manifest.branch,
)
# We can't update this ref with git worktrees until it exists.
# We'll wait until the initial checkout to set it.
if not os.path.exists(self.worktree):
return
base = R_WORKTREE_M
active_git = self.work_git
self._InitAnyMRef(HEAD, self.bare_git, detach=True)
else:
base = R_M
active_git = self.bare_git
self._InitAnyMRef(base + self.manifest.branch, active_git)
def _InitMirrorHead(self):
self._InitAnyMRef(HEAD, self.bare_git)
def _InitAnyMRef(self, ref, active_git, detach=False):
"""Initialize |ref| in |active_git| to the value in the manifest.
This points |ref| to the <project> setting in the manifest.
Args:
ref: The branch to update.
active_git: The git repository to make updates in.
detach: Whether to update target of symbolic refs, or overwrite the
ref directly (and thus make it non-symbolic).
"""
cur = self.bare_ref.symref(ref)
if self.revisionId:
if cur != "" or self.bare_ref.get(ref) != self.revisionId:
msg = "manifest set to %s" % self.revisionId
dst = self.revisionId + "^0"
active_git.UpdateRef(ref, dst, message=msg, detach=True)
else:
remote = self.GetRemote()
dst = remote.ToLocal(self.revisionExpr)
if cur != dst:
msg = "manifest set to %s" % self.revisionExpr
if detach:
active_git.UpdateRef(ref, dst, message=msg, detach=True)
else:
active_git.symbolic_ref("-m", msg, ref, dst)
def _CheckDirReference(self, srcdir, destdir):
# Git worktrees don't use symlinks to share at all.
if self.use_git_worktrees:
return
for name in self.shareable_dirs:
# Try to self-heal a bit in simple cases.
dst_path = os.path.join(destdir, name)
src_path = os.path.join(srcdir, name)
dst = platform_utils.realpath(dst_path)
if os.path.lexists(dst):
src = platform_utils.realpath(src_path)
# Fail if the links are pointing to the wrong place.
if src != dst:
logger.error(
"error: %s is different in %s vs %s",
name,
destdir,
srcdir,
)
raise GitError(
"--force-sync not enabled; cannot overwrite a local "
"work tree. If you're comfortable with the "
"possibility of losing the work tree's git metadata,"
" use `repo sync --force-sync {0}` to "
"proceed.".format(self.RelPath(local=False)),
project=self.name,
)
def _ReferenceGitDir(self, gitdir, dotgit, copy_all):
"""Update |dotgit| to reference |gitdir|, using symlinks where possible.
Args:
gitdir: The bare git repository. Must already be initialized.
dotgit: The repository you would like to initialize.
copy_all: If true, copy all remaining files from |gitdir| ->
|dotgit|. This saves you the effort of initializing |dotgit|
yourself.
"""
symlink_dirs = self.shareable_dirs[:]
to_symlink = symlink_dirs
to_copy = []
if copy_all:
to_copy = platform_utils.listdir(gitdir)
dotgit = platform_utils.realpath(dotgit)
for name in set(to_copy).union(to_symlink):
try:
src = platform_utils.realpath(os.path.join(gitdir, name))
dst = os.path.join(dotgit, name)
if os.path.lexists(dst):
continue
# If the source dir doesn't exist, create an empty dir.
if name in symlink_dirs and not os.path.lexists(src):
os.makedirs(src)
if name in to_symlink:
platform_utils.symlink(
os.path.relpath(src, os.path.dirname(dst)), dst
)
elif copy_all and not platform_utils.islink(dst):
if platform_utils.isdir(src):
shutil.copytree(src, dst)
elif os.path.isfile(src):
shutil.copy(src, dst)
except OSError as e:
if e.errno == errno.EPERM:
raise DownloadError(self._get_symlink_error_message())
else:
raise
def _InitGitWorktree(self):
"""Init the project using git worktrees."""
self.bare_git.worktree("prune")
self.bare_git.worktree(
"add",
"-ff",
"--checkout",
"--detach",
"--lock",
self.worktree,
self.GetRevisionId(),
)
# Rewrite the internal state files to use relative paths between the
# checkouts & worktrees.
dotgit = os.path.join(self.worktree, ".git")
with open(dotgit) as fp:
# Figure out the checkout->worktree path.
setting = fp.read()
assert setting.startswith("gitdir:")
git_worktree_path = setting.split(":", 1)[1].strip()
# Some platforms (e.g. Windows) won't let us update dotgit in situ
# because of file permissions. Delete it and recreate it from scratch
# to avoid.
platform_utils.remove(dotgit)
# Use relative path from checkout->worktree & maintain Unix line endings
# on all OS's to match git behavior.
with open(dotgit, "w", newline="\n") as fp:
print(
"gitdir:",
os.path.relpath(git_worktree_path, self.worktree),
file=fp,
)
# Use relative path from worktree->checkout & maintain Unix line endings
# on all OS's to match git behavior.
with open(
os.path.join(git_worktree_path, "gitdir"), "w", newline="\n"
) as fp:
print(os.path.relpath(dotgit, git_worktree_path), file=fp)
self._InitMRef()
def _InitWorkTree(self, force_sync=False, submodules=False):
"""Setup the worktree .git path.
This is the user-visible path like src/foo/.git/.
With non-git-worktrees, this will be a symlink to the .repo/projects/
path. With git-worktrees, this will be a .git file using "gitdir: ..."
syntax.
Older checkouts had .git/ directories. If we see that, migrate it.
This also handles changes in the manifest. Maybe this project was
backed by "foo/bar" on the server, but now it's "new/foo/bar". We have
to update the path we point to under .repo/projects/ to match.
"""
dotgit = os.path.join(self.worktree, ".git")
# If using an old layout style (a directory), migrate it.
if not platform_utils.islink(dotgit) and platform_utils.isdir(dotgit):
self._MigrateOldWorkTreeGitDir(dotgit, project=self.name)
init_dotgit = not os.path.exists(dotgit)
if self.use_git_worktrees:
if init_dotgit:
self._InitGitWorktree()
self._CopyAndLinkFiles()
else:
if not init_dotgit:
# See if the project has changed.
if platform_utils.realpath(
self.gitdir
) != platform_utils.realpath(dotgit):
platform_utils.remove(dotgit)
if init_dotgit or not os.path.exists(dotgit):
os.makedirs(self.worktree, exist_ok=True)
platform_utils.symlink(
os.path.relpath(self.gitdir, self.worktree), dotgit
)
if init_dotgit:
_lwrite(
os.path.join(dotgit, HEAD), "%s\n" % self.GetRevisionId()
)
# Finish checking out the worktree.
cmd = ["read-tree", "--reset", "-u", "-v", HEAD]
if GitCommand(self, cmd).Wait() != 0:
raise GitError(
"Cannot initialize work tree for " + self.name,
project=self.name,
)
if submodules:
self._SyncSubmodules(quiet=True)
self._CopyAndLinkFiles()
@classmethod
def _MigrateOldWorkTreeGitDir(cls, dotgit, project=None):
"""Migrate the old worktree .git/ dir style to a symlink.
This logic specifically only uses state from |dotgit| to figure out
where to move content and not |self|. This way if the backing project
also changed places, we only do the .git/ dir to .git symlink migration
here. The path updates will happen independently.
"""
# Figure out where in .repo/projects/ it's pointing to.
if not os.path.islink(os.path.join(dotgit, "refs")):
raise GitError(
f"{dotgit}: unsupported checkout state", project=project
)
gitdir = os.path.dirname(os.path.realpath(os.path.join(dotgit, "refs")))
# Remove known symlink paths that exist in .repo/projects/.
KNOWN_LINKS = {
"config",
"description",
"hooks",
"info",
"logs",
"objects",
"packed-refs",
"refs",
"rr-cache",
"shallow",
"svn",
}
# Paths that we know will be in both, but are safe to clobber in
# .repo/projects/.
SAFE_TO_CLOBBER = {
"COMMIT_EDITMSG",
"FETCH_HEAD",
"HEAD",
"gc.log",
"gitk.cache",
"index",
"ORIG_HEAD",
}
# First see if we'd succeed before starting the migration.
unknown_paths = []
for name in platform_utils.listdir(dotgit):
# Ignore all temporary/backup names. These are common with vim &
# emacs.
if name.endswith("~") or (name[0] == "#" and name[-1] == "#"):
continue
dotgit_path = os.path.join(dotgit, name)
if name in KNOWN_LINKS:
if not platform_utils.islink(dotgit_path):
unknown_paths.append(f"{dotgit_path}: should be a symlink")
else:
gitdir_path = os.path.join(gitdir, name)
if name not in SAFE_TO_CLOBBER and os.path.exists(gitdir_path):
unknown_paths.append(
f"{dotgit_path}: unknown file; please file a bug"
)
if unknown_paths:
raise GitError(
"Aborting migration: " + "\n".join(unknown_paths),
project=project,
)
# Now walk the paths and sync the .git/ to .repo/projects/.
for name in platform_utils.listdir(dotgit):
dotgit_path = os.path.join(dotgit, name)
# Ignore all temporary/backup names. These are common with vim &
# emacs.
if name.endswith("~") or (name[0] == "#" and name[-1] == "#"):
platform_utils.remove(dotgit_path)
elif name in KNOWN_LINKS:
platform_utils.remove(dotgit_path)
else:
gitdir_path = os.path.join(gitdir, name)
platform_utils.remove(gitdir_path, missing_ok=True)
platform_utils.rename(dotgit_path, gitdir_path)
# Now that the dir should be empty, clear it out, and symlink it over.
platform_utils.rmdir(dotgit)
platform_utils.symlink(
os.path.relpath(gitdir, os.path.dirname(os.path.realpath(dotgit))),
dotgit,
)
def _get_symlink_error_message(self):
if platform_utils.isWindows():
return (
"Unable to create symbolic link. Please re-run the command as "
"Administrator, or see "
"https://github.com/git-for-windows/git/wiki/Symbolic-Links "
"for other options."
)
return "filesystem must support symlinks"
def _revlist(self, *args, **kw):
a = []
a.extend(args)
a.append("--")
return self.work_git.rev_list(*a, **kw)
@property
def _allrefs(self):
return self.bare_ref.all
def _getLogs(
self, rev1, rev2, oneline=False, color=True, pretty_format=None
):
"""Get logs between two revisions of this project."""
comp = ".."
if rev1:
revs = [rev1]
if rev2:
revs.extend([comp, rev2])
cmd = ["log", "".join(revs)]
out = DiffColoring(self.config)
if out.is_on and color:
cmd.append("--color")
if pretty_format is not None:
cmd.append("--pretty=format:%s" % pretty_format)
if oneline:
cmd.append("--oneline")
try:
log = GitCommand(
self, cmd, capture_stdout=True, capture_stderr=True
)
if log.Wait() == 0:
return log.stdout
except GitError:
# worktree may not exist if groups changed for example. In that
# case, try in gitdir instead.
if not os.path.exists(self.worktree):
return self.bare_git.log(*cmd[1:])
else:
raise
return None
def getAddedAndRemovedLogs(
self, toProject, oneline=False, color=True, pretty_format=None
):
"""Get the list of logs from this revision to given revisionId"""
logs = {}
selfId = self.GetRevisionId(self._allrefs)
toId = toProject.GetRevisionId(toProject._allrefs)
logs["added"] = self._getLogs(
selfId,
toId,
oneline=oneline,
color=color,
pretty_format=pretty_format,
)
logs["removed"] = self._getLogs(
toId,
selfId,
oneline=oneline,
color=color,
pretty_format=pretty_format,
)
return logs
class _GitGetByExec:
def __init__(self, project, bare, gitdir):
self._project = project
self._bare = bare
self._gitdir = gitdir
# __getstate__ and __setstate__ are required for pickling because
# __getattr__ exists.
def __getstate__(self):
return (self._project, self._bare, self._gitdir)
def __setstate__(self, state):
self._project, self._bare, self._gitdir = state
def LsOthers(self):
p = GitCommand(
self._project,
["ls-files", "-z", "--others", "--exclude-standard"],
bare=False,
gitdir=self._gitdir,
capture_stdout=True,
capture_stderr=True,
)
if p.Wait() == 0:
out = p.stdout
if out:
# Backslash is not anomalous.
return out[:-1].split("\0")
return []
def DiffZ(self, name, *args):
cmd = [name]
cmd.append("-z")
cmd.append("--ignore-submodules")
cmd.extend(args)
p = GitCommand(
self._project,
cmd,
gitdir=self._gitdir,
bare=False,
capture_stdout=True,
capture_stderr=True,
)
p.Wait()
r = {}
out = p.stdout
if out:
out = iter(out[:-1].split("\0"))
while out:
try:
info = next(out)
path = next(out)
except StopIteration:
break
class _Info:
def __init__(self, path, omode, nmode, oid, nid, state):
self.path = path
self.src_path = None
self.old_mode = omode
self.new_mode = nmode
self.old_id = oid
self.new_id = nid
if len(state) == 1:
self.status = state
self.level = None
else:
self.status = state[:1]
self.level = state[1:]
while self.level.startswith("0"):
self.level = self.level[1:]
info = info[1:].split(" ")
info = _Info(path, *info)
if info.status in ("R", "C"):
info.src_path = info.path
info.path = next(out)
r[info.path] = info
return r
def GetDotgitPath(self, subpath=None):
"""Return the full path to the .git dir.
As a convenience, append |subpath| if provided.
"""
if self._bare:
dotgit = self._gitdir
else:
dotgit = os.path.join(self._project.worktree, ".git")
if os.path.isfile(dotgit):
# Git worktrees use a "gitdir:" syntax to point to the
# scratch space.
with open(dotgit) as fp:
setting = fp.read()
assert setting.startswith("gitdir:")
gitdir = setting.split(":", 1)[1].strip()
dotgit = os.path.normpath(
os.path.join(self._project.worktree, gitdir)
)
return dotgit if subpath is None else os.path.join(dotgit, subpath)
def GetHead(self):
"""Return the ref that HEAD points to."""
path = self.GetDotgitPath(subpath=HEAD)
try:
with open(path) as fd:
line = fd.readline()
except OSError as e:
raise NoManifestException(path, str(e))
try:
line = line.decode()
except AttributeError:
pass
if line.startswith("ref: "):
return line[5:-1]
return line[:-1]
def SetHead(self, ref, message=None):
cmdv = []
if message is not None:
cmdv.extend(["-m", message])
cmdv.append(HEAD)
cmdv.append(ref)
self.symbolic_ref(*cmdv)
def DetachHead(self, new, message=None):
cmdv = ["--no-deref"]
if message is not None:
cmdv.extend(["-m", message])
cmdv.append(HEAD)
cmdv.append(new)
self.update_ref(*cmdv)
def UpdateRef(self, name, new, old=None, message=None, detach=False):
cmdv = []
if message is not None:
cmdv.extend(["-m", message])
if detach:
cmdv.append("--no-deref")
cmdv.append(name)
cmdv.append(new)
if old is not None:
cmdv.append(old)
self.update_ref(*cmdv)
def DeleteRef(self, name, old=None):
if not old:
old = self.rev_parse(name)
self.update_ref("-d", name, old)
self._project.bare_ref.deleted(name)
def rev_list(self, *args, log_as_error=True, **kw):
if "format" in kw:
cmdv = ["log", "--pretty=format:%s" % kw["format"]]
else:
cmdv = ["rev-list"]
cmdv.extend(args)
p = GitCommand(
self._project,
cmdv,
bare=self._bare,
gitdir=self._gitdir,
capture_stdout=True,
capture_stderr=True,
verify_command=True,
log_as_error=log_as_error,
)
p.Wait()
return p.stdout.splitlines()
def __getattr__(self, name):
"""Allow arbitrary git commands using pythonic syntax.
This allows you to do things like:
git_obj.rev_parse('HEAD')
Since we don't have a 'rev_parse' method defined, the __getattr__
will run. We'll replace the '_' with a '-' and try to run a git
command. Any other positional arguments will be passed to the git
command, and the following keyword arguments are supported:
config: An optional dict of git config options to be passed with
'-c'.
Args:
name: The name of the git command to call. Any '_' characters
will be replaced with '-'.
Returns:
A callable object that will try to call git with the named
command.
"""
name = name.replace("_", "-")
def runner(*args, log_as_error=True, **kwargs):
cmdv = []
config = kwargs.pop("config", None)
for k in kwargs:
raise TypeError(
"%s() got an unexpected keyword argument %r" % (name, k)
)
if config is not None:
for k, v in config.items():
cmdv.append("-c")
cmdv.append("%s=%s" % (k, v))
cmdv.append(name)
cmdv.extend(args)
p = GitCommand(
self._project,
cmdv,
bare=self._bare,
gitdir=self._gitdir,
capture_stdout=True,
capture_stderr=True,
verify_command=True,
log_as_error=log_as_error,
)
p.Wait()
r = p.stdout
if r.endswith("\n") and r.index("\n") == len(r) - 1:
return r[:-1]
return r
return runner
class LocalSyncFail(RepoError):
"""Default error when there is an Sync_LocalHalf error."""
class _PriorSyncFailedError(LocalSyncFail):
def __str__(self):
return "prior sync failed; rebase still in progress"
class _DirtyError(LocalSyncFail):
def __str__(self):
return "contains uncommitted changes"
class _InfoMessage:
def __init__(self, project, text):
self.project = project
self.text = text
def Print(self, syncbuf):
syncbuf.out.info(
"%s/: %s", self.project.RelPath(local=False), self.text
)
syncbuf.out.nl()
class _Failure:
def __init__(self, project, why):
self.project = project
self.why = why
def Print(self, syncbuf):
syncbuf.out.fail(
"error: %s/: %s", self.project.RelPath(local=False), str(self.why)
)
syncbuf.out.nl()
class _Later:
def __init__(self, project, action):
self.project = project
self.action = action
def Run(self, syncbuf):
out = syncbuf.out
out.project("project %s/", self.project.RelPath(local=False))
out.nl()
try:
self.action()
out.nl()
return True
except GitError:
out.nl()
return False
class _SyncColoring(Coloring):
def __init__(self, config):
super().__init__(config, "reposync")
self.project = self.printer("header", attr="bold")
self.info = self.printer("info")
self.fail = self.printer("fail", fg="red")
class SyncBuffer:
def __init__(self, config, detach_head=False):
self._messages = []
self._failures = []
self._later_queue1 = []
self._later_queue2 = []
self.out = _SyncColoring(config)
self.out.redirect(sys.stderr)
self.detach_head = detach_head
self.clean = True
self.recent_clean = True
def info(self, project, fmt, *args):
self._messages.append(_InfoMessage(project, fmt % args))
def fail(self, project, err=None):
self._failures.append(_Failure(project, err))
self._MarkUnclean()
def later1(self, project, what):
self._later_queue1.append(_Later(project, what))
def later2(self, project, what):
self._later_queue2.append(_Later(project, what))
def Finish(self):
self._PrintMessages()
self._RunLater()
self._PrintMessages()
return self.clean
def Recently(self):
recent_clean = self.recent_clean
self.recent_clean = True
return recent_clean
def _MarkUnclean(self):
self.clean = False
self.recent_clean = False
def _RunLater(self):
for q in ["_later_queue1", "_later_queue2"]:
if not self._RunQueue(q):
return
def _RunQueue(self, queue):
for m in getattr(self, queue):
if not m.Run(self):
self._MarkUnclean()
return False
setattr(self, queue, [])
return True
def _PrintMessages(self):
if self._messages or self._failures:
if os.isatty(2):
self.out.write(progress.CSI_ERASE_LINE)
self.out.write("\r")
for m in self._messages:
m.Print(self)
for m in self._failures:
m.Print(self)
self._messages = []
self._failures = []
class MetaProject(Project):
"""A special project housed under .repo."""
def __init__(self, manifest, name, gitdir, worktree):
Project.__init__(
self,
manifest=manifest,
name=name,
gitdir=gitdir,
objdir=gitdir,
worktree=worktree,
remote=RemoteSpec("origin"),
relpath=".repo/%s" % name,
revisionExpr="refs/heads/master",
revisionId=None,
groups=None,
)
def PreSync(self):
if self.Exists:
cb = self.CurrentBranch
if cb:
base = self.GetBranch(cb).merge
if base:
self.revisionExpr = base
self.revisionId = None
@property
def HasChanges(self):
"""Has the remote received new commits not yet checked out?"""
if not self.remote or not self.revisionExpr:
return False
all_refs = self.bare_ref.all
revid = self.GetRevisionId(all_refs)
head = self.work_git.GetHead()
if head.startswith(R_HEADS):
try:
head = all_refs[head]
except KeyError:
head = None
if revid == head:
return False
elif self._revlist(not_rev(HEAD), revid):
return True
return False
class RepoProject(MetaProject):
"""The MetaProject for repo itself."""
@property
def LastFetch(self):
try:
fh = os.path.join(self.gitdir, "FETCH_HEAD")
return os.path.getmtime(fh)
except OSError:
return 0
class ManifestProject(MetaProject):
"""The MetaProject for manifests."""
def MetaBranchSwitch(self, submodules=False):
"""Prepare for manifest branch switch."""
# detach and delete manifest branch, allowing a new
# branch to take over
syncbuf = SyncBuffer(self.config, detach_head=True)
self.Sync_LocalHalf(syncbuf, submodules=submodules)
syncbuf.Finish()
return (
GitCommand(
self,
["update-ref", "-d", "refs/heads/default"],
capture_stdout=True,
capture_stderr=True,
).Wait()
== 0
)
@property
def standalone_manifest_url(self):
"""The URL of the standalone manifest, or None."""
return self.config.GetString("manifest.standalone")
@property
def manifest_groups(self):
"""The manifest groups string."""
return self.config.GetString("manifest.groups")
@property
def reference(self):
"""The --reference for this manifest."""
return self.config.GetString("repo.reference")
@property
def dissociate(self):
"""Whether to dissociate."""
return self.config.GetBoolean("repo.dissociate")
@property
def archive(self):
"""Whether we use archive."""
return self.config.GetBoolean("repo.archive")
@property
def mirror(self):
"""Whether we use mirror."""
return self.config.GetBoolean("repo.mirror")
@property
def use_worktree(self):
"""Whether we use worktree."""
return self.config.GetBoolean("repo.worktree")
@property
def clone_bundle(self):
"""Whether we use clone_bundle."""
return self.config.GetBoolean("repo.clonebundle")
@property
def submodules(self):
"""Whether we use submodules."""
return self.config.GetBoolean("repo.submodules")
@property
def git_lfs(self):
"""Whether we use git_lfs."""
return self.config.GetBoolean("repo.git-lfs")
@property
def use_superproject(self):
"""Whether we use superproject."""
return self.config.GetBoolean("repo.superproject")
@property
def partial_clone(self):
"""Whether this is a partial clone."""
return self.config.GetBoolean("repo.partialclone")
@property
def depth(self):
"""Partial clone depth."""
return self.config.GetInt("repo.depth")
@property
def clone_filter(self):
"""The clone filter."""
return self.config.GetString("repo.clonefilter")
@property
def partial_clone_exclude(self):
"""Partial clone exclude string"""
return self.config.GetString("repo.partialcloneexclude")
@property
def clone_filter_for_depth(self):
"""Replace shallow clone with partial clone."""
return self.config.GetString("repo.clonefilterfordepth")
@property
def manifest_platform(self):
"""The --platform argument from `repo init`."""
return self.config.GetString("manifest.platform")
@property
def _platform_name(self):
"""Return the name of the platform."""
return platform.system().lower()
def SyncWithPossibleInit(
self,
submanifest,
verbose=False,
current_branch_only=False,
tags="",
git_event_log=None,
):
"""Sync a manifestProject, possibly for the first time.
Call Sync() with arguments from the most recent `repo init`. If this is
a new sub manifest, then inherit options from the parent's
manifestProject.
This is used by subcmds.Sync() to do an initial download of new sub
manifests.
Args:
submanifest: an XmlSubmanifest, the submanifest to re-sync.
verbose: a boolean, whether to show all output, rather than only
errors.
current_branch_only: a boolean, whether to only fetch the current
manifest branch from the server.
tags: a boolean, whether to fetch tags.
git_event_log: an EventLog, for git tracing.
"""
# TODO(lamontjones): when refactoring sync (and init?) consider how to
# better get the init options that we should use for new submanifests
# that are added when syncing an existing workspace.
git_event_log = git_event_log or EventLog()
spec = submanifest.ToSubmanifestSpec()
# Use the init options from the existing manifestProject, or the parent
# if it doesn't exist.
#
# Today, we only support changing manifest_groups on the sub-manifest,
# with no supported-for-the-user way to change the other arguments from
# those specified by the outermost manifest.
#
# TODO(lamontjones): determine which of these should come from the
# outermost manifest and which should come from the parent manifest.
mp = self if self.Exists else submanifest.parent.manifestProject
return self.Sync(
manifest_url=spec.manifestUrl,
manifest_branch=spec.revision,
standalone_manifest=mp.standalone_manifest_url,
groups=mp.manifest_groups,
platform=mp.manifest_platform,
mirror=mp.mirror,
dissociate=mp.dissociate,
reference=mp.reference,
worktree=mp.use_worktree,
submodules=mp.submodules,
archive=mp.archive,
partial_clone=mp.partial_clone,
clone_filter=mp.clone_filter,
partial_clone_exclude=mp.partial_clone_exclude,
clone_bundle=mp.clone_bundle,
git_lfs=mp.git_lfs,
use_superproject=mp.use_superproject,
verbose=verbose,
current_branch_only=current_branch_only,
tags=tags,
depth=mp.depth,
git_event_log=git_event_log,
manifest_name=spec.manifestName,
this_manifest_only=True,
outer_manifest=False,
clone_filter_for_depth=mp.clone_filter_for_depth,
)
def Sync(
self,
_kwargs_only=(),
manifest_url="",
manifest_branch=None,
standalone_manifest=False,
groups="",
mirror=False,
reference="",
dissociate=False,
worktree=False,
submodules=False,
archive=False,
partial_clone=None,
depth=None,
clone_filter="blob:none",
partial_clone_exclude=None,
clone_bundle=None,
git_lfs=None,
use_superproject=None,
verbose=False,
current_branch_only=False,
git_event_log=None,
platform="",
manifest_name="default.xml",
tags="",
this_manifest_only=False,
outer_manifest=True,
clone_filter_for_depth=None,
):
"""Sync the manifest and all submanifests.
Args:
manifest_url: a string, the URL of the manifest project.
manifest_branch: a string, the manifest branch to use.
standalone_manifest: a boolean, whether to store the manifest as a
static file.
groups: a string, restricts the checkout to projects with the
specified groups.
mirror: a boolean, whether to create a mirror of the remote
repository.
reference: a string, location of a repo instance to use as a
reference.
dissociate: a boolean, whether to dissociate from reference mirrors
after clone.
worktree: a boolean, whether to use git-worktree to manage projects.
submodules: a boolean, whether sync submodules associated with the
manifest project.
archive: a boolean, whether to checkout each project as an archive.
See git-archive.
partial_clone: a boolean, whether to perform a partial clone.
depth: an int, how deep of a shallow clone to create.
clone_filter: a string, filter to use with partial_clone.
partial_clone_exclude : a string, comma-delimeted list of project
names to exclude from partial clone.
clone_bundle: a boolean, whether to enable /clone.bundle on
HTTP/HTTPS.
git_lfs: a boolean, whether to enable git LFS support.
use_superproject: a boolean, whether to use the manifest
superproject to sync projects.
verbose: a boolean, whether to show all output, rather than only
errors.
current_branch_only: a boolean, whether to only fetch the current
manifest branch from the server.
platform: a string, restrict the checkout to projects with the
specified platform group.
git_event_log: an EventLog, for git tracing.
tags: a boolean, whether to fetch tags.
manifest_name: a string, the name of the manifest file to use.
this_manifest_only: a boolean, whether to only operate on the
current sub manifest.
outer_manifest: a boolean, whether to start at the outermost
manifest.
clone_filter_for_depth: a string, when specified replaces shallow
clones with partial.
Returns:
a boolean, whether the sync was successful.
"""
assert _kwargs_only == (), "Sync only accepts keyword arguments."
groups = groups or self.manifest.GetDefaultGroupsStr(
with_platform=False
)
platform = platform or "auto"
git_event_log = git_event_log or EventLog()
if outer_manifest and self.manifest.is_submanifest:
# In a multi-manifest checkout, use the outer manifest unless we are
# told not to.
return self.client.outer_manifest.manifestProject.Sync(
manifest_url=manifest_url,
manifest_branch=manifest_branch,
standalone_manifest=standalone_manifest,
groups=groups,
platform=platform,
mirror=mirror,
dissociate=dissociate,
reference=reference,
worktree=worktree,
submodules=submodules,
archive=archive,
partial_clone=partial_clone,
clone_filter=clone_filter,
partial_clone_exclude=partial_clone_exclude,
clone_bundle=clone_bundle,
git_lfs=git_lfs,
use_superproject=use_superproject,
verbose=verbose,
current_branch_only=current_branch_only,
tags=tags,
depth=depth,
git_event_log=git_event_log,
manifest_name=manifest_name,
this_manifest_only=this_manifest_only,
outer_manifest=False,
)
# If repo has already been initialized, we take -u with the absence of
# --standalone-manifest to mean "transition to a standard repo set up",
# which necessitates starting fresh.
# If --standalone-manifest is set, we always tear everything down and
# start anew.
if self.Exists:
was_standalone_manifest = self.config.GetString(
"manifest.standalone"
)
if was_standalone_manifest and not manifest_url:
logger.error(
"fatal: repo was initialized with a standlone manifest, "
"cannot be re-initialized without --manifest-url/-u"
)
return False
if standalone_manifest or (
was_standalone_manifest and manifest_url
):
self.config.ClearCache()
if self.gitdir and os.path.exists(self.gitdir):
platform_utils.rmtree(self.gitdir)
if self.worktree and os.path.exists(self.worktree):
platform_utils.rmtree(self.worktree)
is_new = not self.Exists
if is_new:
if not manifest_url:
logger.error("fatal: manifest url is required.")
return False
if verbose:
print(
"Downloading manifest from %s"
% (GitConfig.ForUser().UrlInsteadOf(manifest_url),),
file=sys.stderr,
)
# The manifest project object doesn't keep track of the path on the
# server where this git is located, so let's save that here.
mirrored_manifest_git = None
if reference:
manifest_git_path = urllib.parse.urlparse(manifest_url).path[1:]
mirrored_manifest_git = os.path.join(
reference, manifest_git_path
)
if not mirrored_manifest_git.endswith(".git"):
mirrored_manifest_git += ".git"
if not os.path.exists(mirrored_manifest_git):
mirrored_manifest_git = os.path.join(
reference, ".repo/manifests.git"
)
self._InitGitDir(mirror_git=mirrored_manifest_git)
# If standalone_manifest is set, mark the project as "standalone" --
# we'll still do much of the manifests.git set up, but will avoid actual
# syncs to a remote.
if standalone_manifest:
self.config.SetString("manifest.standalone", manifest_url)
elif not manifest_url and not manifest_branch:
# If -u is set and --standalone-manifest is not, then we're not in
# standalone mode. Otherwise, use config to infer what we were in
# the last init.
standalone_manifest = bool(
self.config.GetString("manifest.standalone")
)
if not standalone_manifest:
self.config.SetString("manifest.standalone", None)
self._ConfigureDepth(depth)
# Set the remote URL before the remote branch as we might need it below.
if manifest_url:
r = self.GetRemote()
r.url = manifest_url
r.ResetFetch()
r.Save()
if not standalone_manifest:
if manifest_branch:
if manifest_branch == "HEAD":
manifest_branch = self.ResolveRemoteHead()
if manifest_branch is None:
logger.error("fatal: unable to resolve HEAD")
return False
self.revisionExpr = manifest_branch
else:
if is_new:
default_branch = self.ResolveRemoteHead()
if default_branch is None:
# If the remote doesn't have HEAD configured, default to
# master.
default_branch = "refs/heads/master"
self.revisionExpr = default_branch
else:
self.PreSync()
groups = re.split(r"[,\s]+", groups or "")
all_platforms = ["linux", "darwin", "windows"]
platformize = lambda x: "platform-" + x
if platform == "auto":
if not mirror and not self.mirror:
groups.append(platformize(self._platform_name))
elif platform == "all":
groups.extend(map(platformize, all_platforms))
elif platform in all_platforms:
groups.append(platformize(platform))
elif platform != "none":
logger.error("fatal: invalid platform flag", file=sys.stderr)
return False
self.config.SetString("manifest.platform", platform)
groups = [x for x in groups if x]
groupstr = ",".join(groups)
if (
platform == "auto"
and groupstr == self.manifest.GetDefaultGroupsStr()
):
groupstr = None
self.config.SetString("manifest.groups", groupstr)
if reference:
self.config.SetString("repo.reference", reference)
if dissociate:
self.config.SetBoolean("repo.dissociate", dissociate)
if worktree:
if mirror:
logger.error("fatal: --mirror and --worktree are incompatible")
return False
if submodules:
logger.error(
"fatal: --submodules and --worktree are incompatible"
)
return False
self.config.SetBoolean("repo.worktree", worktree)
if is_new:
self.use_git_worktrees = True
logger.warning("warning: --worktree is experimental!")
if archive:
if is_new:
self.config.SetBoolean("repo.archive", archive)
else:
logger.error(
"fatal: --archive is only supported when initializing a "
"new workspace."
)
logger.error(
"Either delete the .repo folder in this workspace, or "
"initialize in another location."
)
return False
if mirror:
if is_new:
self.config.SetBoolean("repo.mirror", mirror)
else:
logger.error(
"fatal: --mirror is only supported when initializing a new "
"workspace."
)
logger.error(
"Either delete the .repo folder in this workspace, or "
"initialize in another location."
)
return False
if partial_clone is not None:
if mirror:
logger.error(
"fatal: --mirror and --partial-clone are mutually "
"exclusive"
)
return False
self.config.SetBoolean("repo.partialclone", partial_clone)
if clone_filter:
self.config.SetString("repo.clonefilter", clone_filter)
elif self.partial_clone:
clone_filter = self.clone_filter
else:
clone_filter = None
if partial_clone_exclude is not None:
self.config.SetString(
"repo.partialcloneexclude", partial_clone_exclude
)
if clone_bundle is None:
clone_bundle = False if partial_clone else True
else:
self.config.SetBoolean("repo.clonebundle", clone_bundle)
if submodules:
self.config.SetBoolean("repo.submodules", submodules)
if git_lfs is not None:
if git_lfs:
git_require((2, 17, 0), fail=True, msg="Git LFS support")
self.config.SetBoolean("repo.git-lfs", git_lfs)
if not is_new:
logger.warning(
"warning: Changing --git-lfs settings will only affect new "
"project checkouts.\n"
" Existing projects will require manual updates.\n"
)
if clone_filter_for_depth is not None:
self.ConfigureCloneFilterForDepth(clone_filter_for_depth)
if use_superproject is not None:
self.config.SetBoolean("repo.superproject", use_superproject)
if not standalone_manifest:
success = self.Sync_NetworkHalf(
is_new=is_new,
quiet=not verbose,
verbose=verbose,
clone_bundle=clone_bundle,
current_branch_only=current_branch_only,
tags=tags,
submodules=submodules,
clone_filter=clone_filter,
partial_clone_exclude=self.manifest.PartialCloneExclude,
clone_filter_for_depth=self.manifest.CloneFilterForDepth,
).success
if not success:
r = self.GetRemote()
logger.error("fatal: cannot obtain manifest %s", r.url)
# Better delete the manifest git dir if we created it; otherwise
# next time (when user fixes problems) we won't go through the
# "is_new" logic.
if is_new:
platform_utils.rmtree(self.gitdir)
return False
if manifest_branch:
self.MetaBranchSwitch(submodules=submodules)
syncbuf = SyncBuffer(self.config)
self.Sync_LocalHalf(syncbuf, submodules=submodules)
syncbuf.Finish()
if is_new or self.CurrentBranch is None:
try:
self.StartBranch("default")
except GitError as e:
msg = str(e)
logger.error(
"fatal: cannot create default in manifest %s", msg
)
return False
if not manifest_name:
logger.error("fatal: manifest name (-m) is required.")
return False
elif is_new:
# This is a new standalone manifest.
manifest_name = "default.xml"
manifest_data = fetch.fetch_file(manifest_url, verbose=verbose)
dest = os.path.join(self.worktree, manifest_name)
os.makedirs(os.path.dirname(dest), exist_ok=True)
with open(dest, "wb") as f:
f.write(manifest_data)
try:
self.manifest.Link(manifest_name)
except ManifestParseError as e:
logger.error("fatal: manifest '%s' not available", manifest_name)
logger.error("fatal: %s", e)
return False
if not this_manifest_only:
for submanifest in self.manifest.submanifests.values():
spec = submanifest.ToSubmanifestSpec()
submanifest.repo_client.manifestProject.Sync(
manifest_url=spec.manifestUrl,
manifest_branch=spec.revision,
standalone_manifest=standalone_manifest,
groups=self.manifest_groups,
platform=platform,
mirror=mirror,
dissociate=dissociate,
reference=reference,
worktree=worktree,
submodules=submodules,
archive=archive,
partial_clone=partial_clone,
clone_filter=clone_filter,
partial_clone_exclude=partial_clone_exclude,
clone_bundle=clone_bundle,
git_lfs=git_lfs,
use_superproject=use_superproject,
verbose=verbose,
current_branch_only=current_branch_only,
tags=tags,
depth=depth,
git_event_log=git_event_log,
manifest_name=spec.manifestName,
this_manifest_only=False,
outer_manifest=False,
)
# Lastly, if the manifest has a <superproject> then have the
# superproject sync it (if it will be used).
if git_superproject.UseSuperproject(use_superproject, self.manifest):
sync_result = self.manifest.superproject.Sync(git_event_log)
if not sync_result.success:
submanifest = ""
if self.manifest.path_prefix:
submanifest = f"for {self.manifest.path_prefix} "
logger.warning(
"warning: git update of superproject %s failed, "
"repo sync will not use superproject to fetch source; "
"while this error is not fatal, and you can continue to "
"run repo sync, please run repo init with the "
"--no-use-superproject option to stop seeing this warning",
submanifest,
)
if sync_result.fatal and use_superproject is not None:
return False
return True
def ConfigureCloneFilterForDepth(self, clone_filter_for_depth):
"""Configure clone filter to replace shallow clones.
Args:
clone_filter_for_depth: a string or None, e.g. 'blob:none' will
disable shallow clones and replace with partial clone. None will
enable shallow clones.
"""
self.config.SetString(
"repo.clonefilterfordepth", clone_filter_for_depth
)
def _ConfigureDepth(self, depth):
"""Configure the depth we'll sync down.
Args:
depth: an int, how deep of a partial clone to create.
"""
# Opt.depth will be non-None if user actually passed --depth to repo
# init.
if depth is not None:
if depth > 0:
# Positive values will set the depth.
depth = str(depth)
else:
# Negative numbers will clear the depth; passing None to
# SetString will do that.
depth = None
# We store the depth in the main manifest project.
self.config.SetString("repo.depth", depth)
| GerritCodeReview/git-repo | project.py | project.py | py | 160,523 | python | en | code | 267 | github-code | 36 | [
{
"api_name": "repo_logging.RepoLogger",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "typing.NamedTuple",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "error.RepoError",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "error.RepoE... |
40174507966 | # 访问链接
import requests
# 进度条
from tqdm import tqdm
# 定义一个类来构造方法
class Music:
def __init__(self):
"""
通过用户输入的歌曲名搜索对应的音乐列表
:return: 歌曲列表链接
"""
a = input('请输入想要下载的歌曲名称:')
__url = f'http://www.kuwo.cn/api/www/search/searchMusicBykeyWord?key={a}&pn=1&rn=20&httpsStatus=1&reqId=83069ca0-f2e3-11ed-b9e6-3d0d95dbf491'
self.__url_json(__url)
def __url_json(self, url):
"""
将搜索到的链接进行响应
:return: 放回一个json型数据
"""
headers = {
'Cookie': '_ga=GA1.2.1065701725.1684117448; _gid=GA1.2.207714512.1684117448; _gat=1; Hm_lvt_cdb524f42f0ce19b169a8071123a4797=1684117448,1684118602; Hm_lpvt_cdb524f42f0ce19b169a8071123a4797=1684118602; kw_token=IZHLKUXE23M',
'Referer': 'https://www.kuwo.cn/search/list?key=%E5%91%A8%E6%B7%B1',
'csrf': 'IZHLKUXE23M',
'Host': 'www.kuwo.cn',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36 Edg/113.0.1774.35'
}
res = requests.get(url=url, headers=headers).json()
self.__url_list(res)
def __url_list(self, res):
"""
只获取音乐列表里的第一首歌曲
:return: 返回这首歌曲的音乐信息和通过音乐id获取的链接
"""
lists = res['data']['list']
for i in lists:
# 获取遍历出来的音乐信息
rid = i['rid']
artist = i['artist']
name = i['name']
info = '{} {}'.format(artist, name)
mp3_url = f'https://www.kuwo.cn/api/v1/www/music/playUrl?mid={rid}&type=convert_url3&br=320kmp3'
print('歌曲的访问链接为:{}'.format(mp3_url))
print('歌曲是:{}'.format(info))
self.__url_bytes(info, mp3_url)
break
def __url_bytes(self, info, mp3_url):
"""
通过外链获取音乐的播放链接并使用tqdm对链接进行转换
:return:返回tqdm转换之后的数据
"""
res = requests.get(mp3_url).json()
url_json = res['data']['url']
print('歌曲的下载链接为:{}'.format(url_json))
res_url = requests.get(url_json, stream=True)
self.__mp3_open(info, res_url)
def __mp3_open(self, info, res_url):
"""
将数据转换成二进制数据然后写入本地
:return:
"""
conte_size = int(res_url.headers['content-length']) / 1024
with open('{}.mp3'.format(info), 'wb') as f:
# f.write(url_bytes)
for i in tqdm(iterable=res_url.iter_content(1024),
total=conte_size,
unit='kb',
desc='下载中...'):
f.write(i)
print('下行完成!')
# 创建一个对象,让它来进行音乐的下载,就是这么简单
music = Music() | UIGNB123/kuwo | 面向对象式酷我音乐爬虫,引入init方法和私有方法.py | 面向对象式酷我音乐爬虫,引入init方法和私有方法.py | py | 3,171 | python | zh | code | 1 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number":... |
31772755575 | # coding=utf-8
import os
import logging
from bs4 import UnicodeDammit
from subliminal.api import io, defaultdict
from subliminal_patch.patch_provider_pool import PatchedProviderPool
logger = logging.getLogger(__name__)
def download_subtitles(subtitles, **kwargs):
"""Download :attr:`~subliminal.subtitle.Subtitle.content` of `subtitles`.
All other parameters are passed onwards to the :class:`ProviderPool` constructor.
:param subtitles: subtitles to download.
:type subtitles: list of :class:`~subliminal.subtitle.Subtitle`
"""
with PatchedProviderPool(**kwargs) as pool:
for subtitle in subtitles:
logger.info('Downloading subtitle %r', subtitle)
pool.download_subtitle(subtitle)
def list_all_subtitles(videos, languages, **kwargs):
"""List all available subtitles.
The `videos` must pass the `languages` check of :func:`check_video`.
All other parameters are passed onwards to the :class:`ProviderPool` constructor.
:param videos: videos to list subtitles for.
:type videos: set of :class:`~subliminal.video.Video`
:param languages: languages to search for.
:type languages: set of :class:`~babelfish.language.Language`
:return: found subtitles per video.
:rtype: dict of :class:`~subliminal.video.Video` to list of :class:`~subliminal.subtitle.Subtitle`
"""
listed_subtitles = defaultdict(list)
# return immediatly if no video passed the checks
if not videos:
return listed_subtitles
# list subtitles
with PatchedProviderPool(**kwargs) as pool:
for video in videos:
logger.info('Listing subtitles for %r', video)
subtitles = pool.list_subtitles(video, languages - video.subtitle_languages)
listed_subtitles[video].extend(subtitles)
logger.info('Found %d subtitle(s)', len(subtitles))
return listed_subtitles
def get_subtitle_path(video_path, language=None, extension='.srt', forced_tag=False):
"""Get the subtitle path using the `video_path` and `language`.
:param str video_path: path to the video.
:param language: language of the subtitle to put in the path.
:type language: :class:`~babelfish.language.Language`
:param str extension: extension of the subtitle.
:return: path of the subtitle.
:rtype: str
"""
subtitle_root = os.path.splitext(video_path)[0]
if language:
subtitle_root += '.' + str(language)
if forced_tag:
subtitle_root += ".forced"
return subtitle_root + extension
def save_subtitles(video, subtitles, single=False, directory=None, encoding=None, encode_with=None, chmod=None,
forced_tag=False, path_decoder=None):
"""Save subtitles on filesystem.
Subtitles are saved in the order of the list. If a subtitle with a language has already been saved, other subtitles
with the same language are silently ignored.
The extension used is `.lang.srt` by default or `.srt` is `single` is `True`, with `lang` being the IETF code for
the :attr:`~subliminal.subtitle.Subtitle.language` of the subtitle.
:param video: video of the subtitles.
:type video: :class:`~subliminal.video.Video`
:param subtitles: subtitles to save.
:type subtitles: list of :class:`~subliminal.subtitle.Subtitle`
:param bool single: save a single subtitle, default is to save one subtitle per language.
:param str directory: path to directory where to save the subtitles, default is next to the video.
:param str encoding: encoding in which to save the subtitles, default is to keep original encoding.
:return: the saved subtitles
:rtype: list of :class:`~subliminal.subtitle.Subtitle`
patch: unicode path probems
"""
saved_subtitles = []
for subtitle in subtitles:
# check content
if subtitle.content is None:
logger.error('Skipping subtitle %r: no content', subtitle)
continue
# check language
if subtitle.language in set(s.language for s in saved_subtitles):
logger.debug('Skipping subtitle %r: language already saved', subtitle)
continue
# create subtitle path
subtitle_path = get_subtitle_path(video.name, None if single else subtitle.language, forced_tag=forced_tag)
if directory is not None:
subtitle_path = os.path.join(directory, os.path.split(subtitle_path)[1])
if path_decoder:
subtitle_path = path_decoder(subtitle_path)
# force unicode
subtitle_path = UnicodeDammit(subtitle_path).unicode_markup
subtitle.storage_path = subtitle_path
# save content as is or in the specified encoding
logger.info('Saving %r to %r', subtitle, subtitle_path)
has_encoder = callable(encode_with)
if has_encoder:
logger.info('Using encoder %s' % encode_with.__name__)
# save normalized subtitle if encoder or no encoding is given
if has_encoder or encoding is None:
content = encode_with(subtitle.text) if has_encoder else subtitle.content
with io.open(subtitle_path, 'wb') as f:
f.write(content)
# change chmod if requested
if chmod:
os.chmod(subtitle_path, chmod)
if single:
break
continue
# save subtitle if encoding given
if encoding is not None:
with io.open(subtitle_path, 'w', encoding=encoding) as f:
f.write(subtitle.text)
# change chmod if requested
if chmod:
os.chmod(subtitle_path, chmod)
saved_subtitles.append(subtitle)
# check single
if single:
break
return saved_subtitles
| luboslavgerliczy/SubZero | Contents/Libraries/Shared/subliminal_patch/patch_api.py | patch_api.py | py | 5,965 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "subliminal_patch.patch_provider_pool.PatchedProviderPool",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "subliminal.api.defaultdict",
"line_number": 41,
"usage_type": "... |
69815035624 | import os
from pyspark.sql import DataFrame
from pyspark.sql import types as t, functions as f
from pyspark.sql import SparkSession
from consts import COVID_DATE_FORMAT
def get_dataframe(name: str, session: SparkSession,
cols: list[str], type_mapping: dict,
date_format: str = COVID_DATE_FORMAT) -> DataFrame:
return session.read.csv(
os.path.join(os.getcwd(), 'input_data', name),
schema=t.StructType([
t.StructField(col, type_mapping[col])
for col in cols
]),
header=True,
dateFormat=date_format,
)
def fill_numeric_manual(df: DataFrame,
target_col: str,
median_cols: list[str],
mean_cols: list[str]) -> DataFrame:
for median_column in median_cols:
best_median_df = (df
.filter(f.col(median_column).isNotNull())
.groupBy(target_col)
.agg(f.percentile_approx(median_column, 0.5)
.alias(f"{median_column}_median")))
joined_df = df.join(best_median_df, on=target_col, how='left')
df = (joined_df
.withColumn(median_column,
f.when(joined_df[median_column].isNull(),
joined_df[f'{median_column}_median'])
.otherwise(joined_df[median_column]))
.drop(f'{median_column}_median')
)
for mean_column in mean_cols:
best_mean_df = (df
.filter(f.col(mean_column).isNotNull())
.groupBy(target_col)
.agg(f.percentile_approx(mean_column, 0.5)
.alias(f"{mean_column}_mean")))
joined_df = df.join(best_mean_df, on=target_col, how='left')
df = (joined_df
.withColumn(mean_column,
f.when(joined_df[mean_column].isNull(),
joined_df[f'{mean_column}_mean'])
.otherwise(joined_df[mean_column]))
.drop(f'{mean_column}_mean')
)
return df
def split_dataframe(dataframe: DataFrame, thresholds: list) -> list[DataFrame]:
dataframe = dataframe.orderBy(f.rand())
return dataframe.randomSplit(thresholds, seed=42)
| volodymyrkir/pyspark_ml | utils.py | utils.py | py | 2,410 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pyspark.sql.SparkSession",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "consts.COVID_DATE_FORMAT",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.pat... |
36867704347 | import spacy
nlp = spacy.load('en_core_web_md')
############
word1 = nlp("cat")
word2 = nlp("monkey")
word3 = nlp("banana")
print(word1.similarity(word2))
print(word3.similarity(word2))
print(word3.similarity(word1))
#############
tokens = nlp('cat apple monkey banana')
for token1 in tokens:
for token2 in tokens:
print(token1.text, token2.text, token1.similarity(token2))
#############
sentence_to_compare = "Why is my cat on the car"
sentences = [
"where did my dog go",
"Hello, there is my car",
"I\'ve lost my car in my car",
"I\'d like my boat back",
"I will name my dog Diana",
"animal on car"]
model_sentence = nlp(sentence_to_compare)
for sentence in sentences:
similarity = nlp(sentence).similarity(model_sentence)
print(f"{sentence} - {similarity}")
###whats interesting is that the theyre not comparing the similarities of the string/characters, but moreso, the similarities in whatever the string is representing! So in the case of monkey, this represented an animal. I entered the word ape to go alongside this string, and this gave a similarity value of 1.0 with monkey. The system knows the subcategeries of animals! | rauldesor/T38 | semantic.py | semantic.py | py | 1,163 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "spacy.load",
"line_number": 2,
"usage_type": "call"
}
] |
17190663769 | import PySimpleGUI as sg
class GameGui:
def __init__(self,
box_size=15,
title = 'Japanese Crossword Puzzle!',
puzzle_size=500,
coor_sys_height=130
):
self.box_size = box_size
self.rows = 8
self.cols = 8
sg.theme('SystemDefaultForReal')
layout = [
[sg.Text(title), sg.Text('Press "Generate Puzzle" to start game', key='-Status-')],
[sg.Graph((puzzle_size, puzzle_size),
(0, coor_sys_height),
(coor_sys_height, 0),
key='-PuzzleBoard-',
change_submits=True, drag_submits=False),
sg.Text(('\n'+' '*60)*24, key='-Clues-')
],
]
# Generate japanese character input buttons
i = 0
for row in range(4):
input_buttons = []
for col in range(22):
# every button is associated with a single character for event detection
input_buttons.append(sg.Button('{}'.format(chr(0x3041 + i))))
i += 1
if 0x3041 + i > 0x3096: break # remove unnecessary characters
layout.append(input_buttons)
layout.append([sg.Button('Generate Puzzle'), sg.Button('Show Answer'), sg.Button('Exit')])
self.window = sg.Window('Window Title', layout, finalize=True)
def get_puzzle_interface(self):
return self.window['-PuzzleBoard-']
def monitor_events(self):
return self.window.read()
def display_puzzle(self, horizontal_words = [], vertical_words = [], show_answer=False):
rows = self.rows
cols = self.cols
box_size = self.box_size
puzzle_board = self.get_puzzle_interface()
# build black background first
for row in range(rows):
for col in range(cols):
self.render_rectangle(row, col, fill_color='black')
counter = 0
# render white squares
for hori_word in horizontal_words:
# unpack info:
start_row, start_col, word_len, clue = hori_word[0], hori_word[1], len(hori_word[2]), hori_word[3]
for i in range(start_col, start_col + word_len):
self.render_rectangle(start_row, i, fill_color='white')
if show_answer: self.render_tile_char(start_row, i, hori_word[2][i-start_col],'blue')
for vert_word in vertical_words:
# unpack info:
start_row, start_col, word_len, clue = vert_word[0], vert_word[1], len(vert_word[2]), hori_word[3]
for i in range(start_row, start_row + word_len):
self.render_rectangle(i, start_col, fill_color='white')
if show_answer: self.render_tile_char(i, start_col, vert_word[2][i-start_row],'blue')
# render tile lables for vertical and horizontal words
self.render_tile_labels(horizontal_words, vertical_words)
self.render_clues(horizontal_words, vertical_words)
self.window['-Status-'].update("Generated Puzzle")
if show_answer:
self.window['-Status-'].update("Showing Answer")
def render_rectangle(self, row, col, fill_color='black', line_color='black'):
box_size = self.box_size
puzzle_board = self.get_puzzle_interface()
puzzle_board.draw_rectangle((col * box_size , row * box_size ),
((col+1) * box_size , (row+1) * box_size ),
line_color=line_color, fill_color=fill_color)
def render_black_rectangles(self, words_matrix):
rows = self.rows
cols = self.cols
box_size = self.box_size
puzzle_board = self.get_puzzle_interface()
for row in range(len(words_matrix)):
for col in range(len(words_matrix[row])):
if not (words_matrix[row][col]):
self.render_rectangle(row, col, fill_color='black')
def render_selected_tile(self, row, col, fill_color='green'):
rows = self.rows
cols = self.cols
box_size = self.box_size
puzzle_board = self.get_puzzle_interface()
# draw polygon
points = [((col+.9) * box_size , (row+.1) * box_size ),
((col+.99) * box_size , (row+.1) * box_size ),
((col+.99) * box_size , (row+.2) * box_size )
]
puzzle_board.DrawPolygon(points, fill_color=fill_color)
def clean_previous_selected_tiles(self, words_matrix):
rows = self.rows
cols = self.cols
box_size = self.box_size
puzzle_board = self.get_puzzle_interface()
# clear all previous polygons
for row in range(rows):
for col in range(cols):
self.render_selected_tile(row, col, fill_color='white')
self.render_black_rectangles(words_matrix)
def render_tile_char(self, row, col, tile_char, color='black'):
box_size = self.box_size
puzzle_board = self.get_puzzle_interface()
self.render_rectangle(row, col, fill_color='white')
letter_location = (col * box_size + box_size*.7,
row * box_size + box_size*.7)
puzzle_board.draw_text(tile_char, letter_location, font='Courier 25', color=color)
def render_tile_label(self, row, col, tile_label, align='horizontal'):
box_size = self.box_size
puzzle_board = self.get_puzzle_interface()
if align == 'horizontal':
puzzle_board.draw_text(tile_label, (col * box_size + box_size*.2, row * box_size+ box_size*.4))
else: # for vertical words
puzzle_board.draw_text(tile_label, (col * box_size + box_size*.4, row * box_size+ box_size*.2))
def render_tile_labels(self, horizontal_words, vertical_words):
box_size = self.box_size
puzzle_board = self.get_puzzle_interface()
counter = 0
for hori_word in horizontal_words:
# unpack info:
start_row, start_col = hori_word[0], hori_word[1]
self.render_tile_label(start_row, start_col, tile_label='h'+str(counter), align='horizontal')
counter += 1
counter = 0
for vert_word in vertical_words:
# unpack info:
start_row, start_col = vert_word[0], vert_word[1]
self.render_tile_label(start_row, start_col,tile_label='v'+str(counter), align='vertical')
counter += 1
def render_clues(self, horizontal_words, vertical_words):
box_size = self.box_size
puzzle_board = self.get_puzzle_interface()
counter = 0
clues = 'Clues:\n'
for hori_word in horizontal_words:
# unpack info:
clue = hori_word[3]
clues = clues + 'h'+str(counter) + ') ' + clue + '\n'
counter += 1
counter = 0
for vert_word in vertical_words:
# unpack info:
clue = vert_word[3]
clues = clues + 'v'+str(counter) + ') ' + clue + '\n'
counter += 1
print(clues)
self.window['-Clues-'].update(clues)
| chengcj-upenn/jp_crossword_puzzle | frontend.py | frontend.py | py | 7,549 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "PySimpleGUI.theme",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "PySimpleGUI.Text",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "PySimpleGUI.Graph",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "PySimpleGUI.Text... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.