query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
listlengths
4
101
negative_scores
listlengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
WAV file is loaded and transformed into Fourier Series. This Fourier Series is limited.
def wav_to_fourier(wav_file, rate_limit=6000.0, step=1.0): rate, aud_data = read(wav_file) # Should be mono if len(aud_data) != len(aud_data.ravel()): aud_data = np.mean(aud_data, axis=1) # Zero padding len_data = aud_data.shape[0] channel_1 = np.zeros(2 ** (int(np.ceil(np.log2(len_data))))) channel_1[0:len_data] = aud_data # Fourier analysis fourier = np.abs(np.fft.fft(channel_1)) freq = np.linspace(0, rate, fourier.shape[0]) freq, fourier = limit_by_freq(freq, fourier, upper_limit=rate_limit) freq, fourier = group_by_freq(freq, fourier, step=step) # Max frequency should be 100.0 a = np.max(np.abs(fourier)) / 100.0 fourier = fourier / a return freq, fourier
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_wav(file_name):\n fs, signal = wavfile.read(file_name)\n signal = np.float32(signal) / (2**(16)/2-1)\n return fs, signal", "def analyzeWAV(inputFile):\n data, fs, nbits = audiolab.wavread(inputFile)\n samplingRate = fs\n return [data, samplingRate]", "def inputwav(filename):\n dat...
[ "0.6820377", "0.6551853", "0.65371555", "0.649495", "0.6372757", "0.6339702", "0.63110816", "0.6309993", "0.63011295", "0.6290294", "0.62898475", "0.62364256", "0.62239075", "0.62043417", "0.61933094", "0.61776847", "0.61596906", "0.6157661", "0.6120229", "0.60863763", "0.606...
0.6188984
15
Limit arrays of frequency and features by maximum frequency and bottom frequency.
def limit_by_freq(freq, features, upper_limit, lower_limit=None): # Copy into arrays, in order to apply mask freq = np.array(freq, dtype=np.float) features = np.array(features, dtype=np.float) # Mask for bottom limit if lower_limit is not None: bottom_mask = freq >= lower_limit features = features[bottom_mask] freq = freq[bottom_mask] # Mask for upper limit upper_mask = freq <= upper_limit features = features[upper_mask] freq = freq[upper_mask] return freq, features
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def findMaximal(freqSet):", "def __restrict_features_freq(self, min_count=1):\n col_idx = self.X.tocsc().nonzero()[1]\n counter = np.bincount(col_idx)\n print(\"Counter:\", len(counter))\n include_cols = np.where(counter > min_count)[0]\n return include_cols", "def fit_featur...
[ "0.6321469", "0.5905518", "0.5899971", "0.5845656", "0.5827781", "0.5711425", "0.55240804", "0.5505938", "0.54641736", "0.5451525", "0.5440867", "0.5408901", "0.5367295", "0.53634137", "0.5352045", "0.5324692", "0.53187746", "0.53122324", "0.5303038", "0.52815557", "0.5275447...
0.72728205
0
Check if song is already transformed into temp.
def check_wav(song, source_folder, temp_folder, encoder='mpg123'): # Name of files song_name, extension = os.path.splitext(song) mp3_file = os.path.join(source_folder, song) if '.wav' != extension: wav_file = os.path.join(temp_folder, song_name + '.wav') try: if not os.path.isfile(wav_file): mp3_to_wav( mp3_file=mp3_file, wav_file=wav_file, encoder=encoder) else: pass except MemoryError: logger.error('MemoryError: %s MP3 couldn\'t be transformed into WAV', song_name) else: # Already a wav file copyfile(mp3_file, os.path.join(temp_folder, song_name))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def isTemp(self,object):\n return (object in self.tempObjects)", "def test_transform_track_album_based_on_album_title_no_match_album(self):\n track = Track(artist='Artist', album='Album 3', title='Title',\n tracknum=1, seconds=60)\n transform = Transform(1,\n cond_album...
[ "0.56140214", "0.54819727", "0.542196", "0.5414857", "0.5397536", "0.5393968", "0.5390524", "0.53878826", "0.53857505", "0.5385733", "0.5346858", "0.5340154", "0.53382695", "0.5315191", "0.5310992", "0.53011787", "0.52986276", "0.5296976", "0.5272015", "0.5269272", "0.5257539...
0.5619942
0
Return a dictionary with memory information.
def get_mem_info(): MemInfoEntry = namedtuple('MemInfoEntry', ['value', 'unit']) mem_info = {} with open('/proc/meminfo') as file: for line in file: key, value, *unit = line.strip().split() mem_info[key.rstrip(':')] = MemInfoEntry(value, unit) return mem_info
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def MemoryInfo(cls):\n\t\tres = {}\n\t\tfor line in cat(\"/proc/meminfo\").split(\"\\n\")[:-1]:\n\t\t\tline = RE_SPACES.sub(\" \", line).strip().split(\" \")\n\t\t\tname, value = line[:2]\n\t\t\tres[name.replace(\"(\", \"_\").replace(\")\", \"_\").replace(\":\", \"\")] = int(value)\n\t\treturn res", "def get_mem...
[ "0.83256704", "0.8320725", "0.8266024", "0.8175442", "0.8164575", "0.800875", "0.79796195", "0.7837153", "0.778341", "0.77577746", "0.7723628", "0.76374567", "0.7615092", "0.75794065", "0.75041574", "0.7460927", "0.7370262", "0.7370262", "0.7370262", "0.7370262", "0.7370262",...
0.79375446
7
Return the free space in Gigabits.
def get_free_gb(): mem_info = get_mem_info() free_gb = float(mem_info['MemAvailable'].value) / 10**6 return free_gb
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_cgts_vg_free_space():\n\n try:\n # Determine space in cgts-vg in GiB\n vg_free_str = subprocess.check_output( # pylint: disable=not-callable\n ['vgdisplay', '-C', '--noheadings', '--nosuffix',\n '-o', 'vg_free', '--units', 'g', 'cgts-vg'],\n close_fds=Tru...
[ "0.7192137", "0.6909616", "0.690619", "0.68786997", "0.6813705", "0.67277247", "0.66669893", "0.6577275", "0.6504887", "0.65012854", "0.64604944", "0.6451814", "0.6396979", "0.63525146", "0.63032967", "0.6267119", "0.6259522", "0.62487036", "0.6246622", "0.62347066", "0.62068...
0.75012624
0
True if it can't run, else otherwise. Condition is Gb of RAM memory available.
def ram_condition(min_gb=3): return get_free_gb() < min_gb
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_if_sufficient_memory():\n percent_memory = psutil.virtual_memory().percent\n if percent_memory > 75:\n raise ValueError('Please use a device with more CPU ram or a smaller dataset')", "def memory_check(self) -> bool:\n available_bytes = psutil.virtual_memory().available\...
[ "0.7269116", "0.68143415", "0.66817755", "0.64842796", "0.6442835", "0.6359018", "0.6243431", "0.6243431", "0.62134373", "0.61970484", "0.58983666", "0.58968914", "0.5888447", "0.5873215", "0.58724576", "0.57852805", "0.5777731", "0.57357985", "0.57344395", "0.5731109", "0.56...
0.6852558
1
True if it can't run, else otherwise. Condition is a proportion of RAM memory available.
def ram_prop_condition(prop=0.25): mem_info = get_mem_info() total_mem = float(mem_info['MemTotal'].value) / 10**6 min_gb = prop * total_mem return ram_condition(min_gb=min_gb)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_if_sufficient_memory():\n percent_memory = psutil.virtual_memory().percent\n if percent_memory > 75:\n raise ValueError('Please use a device with more CPU ram or a smaller dataset')", "def memory_check(self) -> bool:\n available_bytes = psutil.virtual_memory().available\...
[ "0.7295557", "0.6686275", "0.6624224", "0.63968927", "0.62839794", "0.623324", "0.6183667", "0.61706424", "0.6126043", "0.6126043", "0.5933436", "0.5923849", "0.5857149", "0.58338654", "0.58319116", "0.5819135", "0.5796401", "0.5792657", "0.5691452", "0.5668905", "0.5658324",...
0.60934323
10
Transform a MP3 song into WAV format, and then into Fourier series.
def time_to_frequency(song, temp_folder, output_folder, rate_limit=6000.0, overwrite=True, plot=True, image_folder=None, step=5.0): # Name of files song_name = os.path.splitext(song)[0] json_name = os.path.join(output_folder, song_name + '.json') wav_file = os.path.join(temp_folder, song_name + '.wav') if not os.path.isfile(json_name) or overwrite is True: # Fourier transformation try: if ram_prop_condition(prop=0.1): logger.error('Song %s is waiting until more memory is available', song_name) while ram_prop_condition(prop=0.2): pass # It consumes cpu, but we assure it doesn't go to sleep indefinitely frequencies, fourier_series = wav_to_fourier(wav_file=wav_file, rate_limit=rate_limit, step=step) # Save as JSON json_to_save = {song: {str(x): y for x, y in zip(frequencies, fourier_series)}} with open(json_name, 'w') as output: json.dump(json_to_save, output) # Plotting if plot is True: fourier_plot(freq=frequencies, features=fourier_series, folder=image_folder, filename=song_name) logger.debug('%s transformed', song_name) except MemoryError: logger.error('MemoryError: %s couldn\'t be Fourier transformed', song_name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def all_wav_to_mp3(self):\n for each_file, artist in self.past_songs_db_data:\n self.convert_wav_to_mp3(each_file)", "def mp3_to_wav(show_progress=True):\n\n # Define a devnull var to supress subprocess output\n devnull = open(os.devnull, 'w')\n\n # Get a list of the filepath for each ...
[ "0.6867949", "0.6767562", "0.6552796", "0.64654547", "0.62336755", "0.6104877", "0.6079277", "0.6008433", "0.59922224", "0.5926576", "0.589333", "0.58891535", "0.58476907", "0.5833807", "0.5785292", "0.56830513", "0.5678468", "0.5659172", "0.56562704", "0.5653985", "0.5649063...
0.5470141
29
Transform a directory full of MP3 files into WAV files, and then into Fourier series, working with directories.
def transform_folder(source_folder, output_folder, temp_folder, rate_limit=6000.0, overwrite=True, plot=False, image_folder=None, multiprocess=False, encoder='mpg123', step=5.0): merged_file = os.path.join(output_folder, 'merged_file.json') os.makedirs(temp_folder, exist_ok=True) os.makedirs(output_folder, exist_ok=True) if os.path.isfile(merged_file): os.remove(merged_file) if plot: os.makedirs(image_folder, exist_ok=True) # Check if mp3 is already transformed into wav. Right # now, foucluster doesn't have a direct read from mp3 logger.info('Checking if songs are in WAV format...') if source_folder != temp_folder: [check_wav(song=song, source_folder=source_folder, temp_folder=temp_folder, encoder=encoder) for song in os.listdir(source_folder)] if multiprocess is True: logger.debug('Fourier is applied in multiprocess') songs = [(song, temp_folder, output_folder, rate_limit, overwrite, plot, image_folder, step) for song in os.listdir(source_folder)] # with mp.Pool(processes=max(int(mp.cpu_count() / 2.0), 1)) as p: with mp.Pool(processes=mp.cpu_count(), maxtasksperchild=1) as p: p.starmap(time_to_frequency, songs) else: logger.debug('Fourier is applied in single core') [time_to_frequency(song=song, temp_folder=temp_folder, output_folder=output_folder, rate_limit=rate_limit, overwrite=overwrite, plot=plot, image_folder=image_folder, step=step) for song in os.listdir(source_folder)] # read_files = glob.glob(os.path.join(output_folder, '*.json')) # with open(merged_file, 'w') as outfile: # file_contents = [open(f).read() for f in read_files] # outfile.write('[{}]'.format(','.join(file_contents)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def all_wav_to_mp3(self):\n for each_file, artist in self.past_songs_db_data:\n self.convert_wav_to_mp3(each_file)", "def mp3_to_wav(show_progress=True):\n\n # Define a devnull var to supress subprocess output\n devnull = open(os.devnull, 'w')\n\n # Get a list of the filepath for each ...
[ "0.7115496", "0.6985259", "0.67233366", "0.6686839", "0.65235364", "0.64989144", "0.63002807", "0.6277914", "0.6166281", "0.61311907", "0.61195964", "0.606834", "0.6029333", "0.60196835", "0.6009603", "0.5915913", "0.5896318", "0.5880936", "0.58191925", "0.5811102", "0.581042...
0.6515147
5
Initialize the SingleImage object
def __init__(self, camera): self.__camera = camera self.__innerOrientationParameters = None self.__isSolved = False self.__exteriorOrientationParameters = np.array([0, 0, 0, 0, 0, 0], 'f') self.__rotationMatrix = None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, image):\n self.image = image", "def __init__(self, img):\n self.img = img", "def initImages(self):\n pass", "def initImages(self):\n pass", "def initImages(self):\n pass", "def __init__(self, image: np.ndarray) -> None:\n self.image = image", ...
[ "0.7476075", "0.72321916", "0.70613927", "0.70613927", "0.70613927", "0.70282173", "0.7005531", "0.7002043", "0.7002043", "0.7002043", "0.7002043", "0.7002043", "0.6960504", "0.69188726", "0.6878795", "0.67705446", "0.6648267", "0.6634296", "0.6630021", "0.6602309", "0.654855...
0.0
-1
The camera that took the image
def camera(self): return self.__camera
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def camera(self):\n return self._camera", "def camera(self):\n return self._camera", "def query_camera(self):\n ok, orig_pic = self.vs.read() # Read video stream\n if ok: # If no errors\n orig_pic = imutils.rotate(orig_pic, angle=self.camera_rot)\n curr_pic =...
[ "0.8029401", "0.8029401", "0.7391439", "0.7336508", "0.7180209", "0.711195", "0.711195", "0.71046424", "0.70813036", "0.70528156", "0.69937193", "0.69860065", "0.69809467", "0.68731916", "0.6841381", "0.6825675", "0.68190277", "0.680215", "0.6788457", "0.6687622", "0.66858554...
0.81577206
0
r""" Property for the exterior orientation parameters
def exteriorOrientationParameters(self): return self.__exteriorOrientationParameters
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def orient(self):\n return self.__ph.get('orient', PH_ORIENT_HORZ)", "def get_orientation(self):\r\n return self.__orientation", "def Orientation(self, *args):\n return _Adaptor3d.Adaptor3d_TopolTool_Orientation(self, *args)", "def orientation(self):\n return self._orientation", ...
[ "0.61678284", "0.61665916", "0.61640745", "0.60078", "0.60078", "0.5891339", "0.5885623", "0.58542037", "0.5838997", "0.5836636", "0.5822891", "0.58147955", "0.5795377", "0.57695895", "0.57024115", "0.56947505", "0.56763643", "0.5612986", "0.5593346", "0.5587369", "0.5578711"...
0.80023855
0
The rotation matrix of the image Relates to the exterior orientation
def rotationMatrix(self): R = Compute3DRotationMatrix(self.exteriorOrientationParameters[3], self.exteriorOrientationParameters[4], self.exteriorOrientationParameters[5]) return R
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_rotation_angle(self, image):\n \n # TODO: Make real functionality\n return 0", "def rotation_matrix(self):\n return self.affine_matrix[0:3][:, 0:3]", "def rotation(self):\n\t\treturn self.piv.a.rotate.v", "def matrix(self):\n return self._rotation", "def rotation(...
[ "0.7759922", "0.77279866", "0.7530842", "0.7502491", "0.7206852", "0.71904266", "0.7162689", "0.71563435", "0.7058655", "0.7058655", "0.7058655", "0.7058655", "0.7058655", "0.7058655", "0.7058655", "0.7058655", "0.7058655", "0.7058655", "0.7058655", "0.7005358", "0.6981667", ...
0.77768993
0
The rotation matrix of the image Relates to the exterior orientation
def rotationMatrix_RzRyRz(self): R = Compute3DRotationMatrix_RzRyRz(self.exteriorOrientationParameters[3], self.exteriorOrientationParameters[4], self.exteriorOrientationParameters[5]) return R
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rotationMatrix(self):\n\n R = Compute3DRotationMatrix(self.exteriorOrientationParameters[3], self.exteriorOrientationParameters[4],\n self.exteriorOrientationParameters[5])\n\n return R", "def get_rotation_angle(self, image):\n \n # TODO: Make re...
[ "0.77768993", "0.7759922", "0.77279866", "0.7530842", "0.7502491", "0.7206852", "0.71904266", "0.7162689", "0.71563435", "0.7058655", "0.7058655", "0.7058655", "0.7058655", "0.7058655", "0.7058655", "0.7058655", "0.7058655", "0.7058655", "0.7058655", "0.7058655", "0.7005358",...
0.6058148
93
True if the exterior orientation is solved
def isSolved(self): return self.__isSolved
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_versor(self) -> bool:\n return np.isclose(np.linalg.norm(self.A), 1.0)", "def is_hom_alt(self) -> bool:\n return self.is_hom() and (self.allele1 > 0 or self.allele2 > 0)", "def solved(self):\n return all(cell == 1 for row in self.faces for cell in row) or all(cell == 0 for row in se...
[ "0.645655", "0.63614464", "0.62675583", "0.62624866", "0.62122506", "0.6185279", "0.6101881", "0.60985523", "0.605189", "0.6042637", "0.6027129", "0.6021935", "0.6013572", "0.6010895", "0.59998566", "0.5995052", "0.5970132", "0.5969842", "0.5943013", "0.59369075", "0.59358853...
0.0
-1
r""" Compute inner orientation parameters
def ComputeInnerOrientation(self, imagePoints): # implementing observation vectors imagePoints = imagePoints.reshape(np.size(imagePoints), 1) fMarks = self.camera.fiducialMarks.reshape(np.size(self.camera.fiducialMarks), 1) n = int(len(imagePoints)) # number of observations u = 6 # 6 orientation parameters A = np.zeros((n, u)) # A matrix (n,u) j = 0 for i in range(len(imagePoints)): if i % 2 == 0: A[i, 0] = 1; A[i, 1] = 0; A[i, 2] = fMarks[j]; A[i, 3] = fMarks[j + 1]; A[i, 4] = 0 A[i, 5] = 0 else: A[i, 0] = 0; A[i, 1] = 1; A[i, 2] = 0; A[i, 3] = 0; A[i, 4] = fMarks[j]; A[i, 5] = fMarks[j + 1] j += 2 X = np.dot(la.inv(np.dot(np.transpose(A), A)), np.dot(np.transpose(A), imagePoints)) v = np.dot(A, X) - imagePoints adjustment_results = {"params": X, "residuals": v, "N": np.dot(np.transpose(A), A)} self.__innerOrientationParameters = X # updating the inner orientation params return adjustment_results
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ComputeInverseInnerOrientation(self):\n a0 = self.innerOrientationParameters[0]\n b0 = self.innerOrientationParameters[1]\n a1 = self.innerOrientationParameters[2]\n a2 = self.innerOrientationParameters[3]\n b1 = self.innerOrientationParameters[4]\n b2 = self.innerOrie...
[ "0.687762", "0.6436624", "0.58278096", "0.578275", "0.57705504", "0.5755263", "0.5649262", "0.5623161", "0.5620293", "0.5616361", "0.55939484", "0.55838054", "0.5497484", "0.54887134", "0.54881567", "0.54828835", "0.54536176", "0.5438182", "0.5438182", "0.54221433", "0.541117...
0.7024464
0
Computes the geometric inner orientation parameters
def ComputeGeometricParameters(self): # extracting inner orientation params a0 = self.innerOrientationParameters[0] b0 = self.innerOrientationParameters[1] a1 = self.innerOrientationParameters[2] a2 = self.innerOrientationParameters[3] b1 = self.innerOrientationParameters[4] b2 = self.innerOrientationParameters[5] # computing algebric params tx = a0; ty = b0 theta = np.arctan(b1 / b2) gamma = np.arctan((a1 * np.sin(theta) + a2 * np.cos(theta)) / (b1 * np.sin(theta) + b2 * np.cos(theta))) sx = a1 * np.cos(theta) - a2 * np.sin(theta) sy = (a1 * np.sin(theta) + a2 * np.cos(theta)) / np.sin(gamma) return {"translationX": tx, "translationY": ty, "rotationAngle": np.rad2deg(theta), "scaleFactorX": sx, "scaleFactorY": sy, "shearAngle": np.rad2deg(gamma)}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def magnitude_orientation(gx, gy):\n \n magnitude = np.sqrt(gx**2 + gy**2)\n orientation = (np.arctan2(gy, gx) * 180 / np.pi) % 180\n \n return magnitude, orientation", "def ComputeInverseInnerOrientation(self):\n a0 = self.innerOrientationParameters[0]\n b0 = self....
[ "0.6358747", "0.6289955", "0.626587", "0.6029137", "0.5908361", "0.5868645", "0.5784191", "0.57483953", "0.5706465", "0.5689375", "0.5624956", "0.5610132", "0.5609869", "0.560806", "0.5597778", "0.5587196", "0.5573857", "0.5553297", "0.55513024", "0.55361223", "0.5525677", ...
0.740137
0
Computes the parameters of the inverse inner orientation transformation
def ComputeInverseInnerOrientation(self): a0 = self.innerOrientationParameters[0] b0 = self.innerOrientationParameters[1] a1 = self.innerOrientationParameters[2] a2 = self.innerOrientationParameters[3] b1 = self.innerOrientationParameters[4] b2 = self.innerOrientationParameters[5] mat = np.array([[a1[0], a2[0]], [b1[0], b2[0]]]) mat = la.inv(mat) return np.array([a0[0], b0[0], mat[0, 0], mat[0, 1], mat[1, 0], mat[1, 1]]).T
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def inverse(self):\n def inv(v):\n v[0], v[1] = v[1] , v[0]\n for v in [self.point1 , self.pointN , self.unitv, self.normalv]:\n inv(v)\n\n self.points = numpy.roll(self.points,1,axis=1)\n self.a, self.b = self.b, self.a\n self.angle = numpy.arccos( self.uni...
[ "0.63808596", "0.6343662", "0.63060564", "0.6252362", "0.61781716", "0.61693305", "0.61612785", "0.61262125", "0.61208826", "0.6098615", "0.609339", "0.60388464", "0.59626293", "0.59568155", "0.5946826", "0.5945765", "0.59148675", "0.5901364", "0.5894338", "0.5879606", "0.586...
0.7558201
0
Transforms camera points to image points
def CameraToImage(self, cameraPoints): # setting up the required matrices a0 = self.innerOrientationParameters[0] b0 = self.innerOrientationParameters[1] a1 = self.innerOrientationParameters[2] a2 = self.innerOrientationParameters[3] b1 = self.innerOrientationParameters[4] b2 = self.innerOrientationParameters[5] if np.isscalar(a0): R = np.array([[a1, a2], [b1, b2]]) T = np.array([[a0], [b0]]) else: R = np.array([[a1[0], a2[0]], [b1[0], b2[0]]]) T = np.array([[a0[0]], [b0[0]]]) cameraPoints = cameraPoints.T # computing the transformation to the image system return (T + np.dot(R, cameraPoints)).T
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def imageFromCamera(self, points, hide_backpoints=True):\n points = np.array(points)\n # set small z distances to 0\n points[np.abs(points[..., 2]) < 1e-10] = 0\n # transform the points\n with np.errstate(divide='ignore', invalid='ignore'):\n transformed_points = np.ar...
[ "0.75200397", "0.7221418", "0.72199947", "0.71426326", "0.71409965", "0.7138931", "0.7127518", "0.7100937", "0.70733917", "0.6988374", "0.69729286", "0.69044685", "0.6830623", "0.6829222", "0.6822007", "0.6778279", "0.67171186", "0.6710748", "0.66177326", "0.661222", "0.65576...
0.73480624
1
Transforms image points to ideal camera points
def ImageToCamera(self, imagePoints): inverse_pars = self.ComputeInverseInnerOrientation() imagePoints = imagePoints.T if imagePoints.size == 2: imagePoints = np.reshape(np.array(imagePoints), (np.size(imagePoints), 1)) T = np.array([[inverse_pars[0]], [inverse_pars[1]]]) R = np.array([[inverse_pars[2], inverse_pars[3]], [inverse_pars[4], inverse_pars[5]]]) return (np.dot(R, imagePoints - T)).T
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def camera_transform(image):\n img = np.zeros((image.shape[0], image.shape[1], 3))\n for y in range(image.shape[0]):\n for x in range(image.shape[1]):\n img[y][x] = (x - 320) / 575.5 * image[y, x], (240 - y) / 575.5 * image[y, x], image[\n y, x]\n return img", "def image...
[ "0.74925107", "0.72041154", "0.7066651", "0.7043059", "0.7004196", "0.69641924", "0.69523805", "0.6926867", "0.6857847", "0.6825331", "0.67971975", "0.6750294", "0.6734582", "0.663383", "0.65459144", "0.65373534", "0.65190524", "0.65055305", "0.6471489", "0.6462659", "0.64615...
0.70939386
2
Compute exterior orientation parameters. This function can be used in conjecture with ``self.__ComputeDesignMatrix(groundPoints)`` and ``self__ComputeObservationVector(imagePoints)``
def ComputeExteriorOrientation(self, imagePoints, groundPoints, epsilon): # cameraPoints = self.ImageToCamera(imagePoints) cameraPoints = imagePoints self.__ComputeApproximateVals(cameraPoints, groundPoints) l0 = self.__ComputeObservationVector(groundPoints.T) l0 = np.reshape(l0, (-1, 1)) l = cameraPoints.reshape(np.size(cameraPoints), 1) - l0 A = self.__ComputeDesignMatrix(groundPoints.T) N = np.dot(A.T, A) u = np.dot(A.T, l) deltaX = np.dot(la.inv(N), u) # update orientation pars self.__exteriorOrientationParameters = np.add(self.__exteriorOrientationParameters, np.reshape(deltaX, 6)) while la.norm(deltaX) > epsilon: l0 = self.__ComputeObservationVector(groundPoints.T) l0 = np.reshape(l0, (-1, 1)) l = cameraPoints.reshape(np.size(cameraPoints), 1) - l0 A = self.__ComputeDesignMatrix(groundPoints.T) N = np.dot(A.T, A) u = np.dot(A.T, l) deltaX = np.dot(la.inv(N), u) # update orientation pars self.__exteriorOrientationParameters = np.add(self.__exteriorOrientationParameters, np.reshape(deltaX, 6)) # compute residuals l_a = np.reshape(self.__ComputeObservationVector(groundPoints.T), (-1, 1)) v = l_a - cameraPoints.reshape(np.size(cameraPoints), 1) if (np.size(A, 0) - np.size(deltaX)) != 0: sig = np.dot(v.T, v) / (np.size(A, 0) - np.size(deltaX)) sigmaX = sig[0] * la.inv(N) else: sigmaX = None return [self.exteriorOrientationParameters, sigmaX, v]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def exteriorOrientationParameters(self):\n return self.__exteriorOrientationParameters", "def ComputeInnerOrientation(self, imagePoints):\n # implementing observation vectors\n imagePoints = imagePoints.reshape(np.size(imagePoints), 1)\n\n fMarks = self.camera.fiducialMarks.reshape(n...
[ "0.71254003", "0.68282026", "0.6420376", "0.61255026", "0.61024475", "0.5787343", "0.57387304", "0.5717728", "0.5698031", "0.567763", "0.5626359", "0.55450577", "0.5525408", "0.5507461", "0.54807997", "0.54278624", "0.5404268", "0.53900427", "0.5351375", "0.5299402", "0.52918...
0.69500554
1
Compute exterior orientation parameters. This function can be used in conjecture with ``self.__ComputeDesignMatrix(groundPoints)`` and ``self__ComputeObservationVector(imagePoints)``
def ComputeExteriorOrientation_RzRyRz(self, imagePoints, groundPoints, epsilon): # cameraPoints = self.ImageToCamera(imagePoints) cameraPoints = imagePoints self.exteriorOrientationParameters[0:3] = np.dot(self.rotationMatrix_RzRyRz, self.exteriorOrientationParameters[0:3]) self.exteriorOrientationParameters = np.add(self.exteriorOrientationParameters, np.random.normal(0, 0.01, self.exteriorOrientationParameters.shape)) l0 = self.__ComputeObservationVector_RzRyRz(groundPoints.T) l0 = np.reshape(l0, (-1, 1)) l = cameraPoints.reshape(np.size(cameraPoints), 1) - l0 A = self.__ComputeDesignMatrix_RzRyRz(groundPoints.T) N = np.dot(A.T, A) u = np.dot(A.T, l) deltaX = np.dot(la.inv(N), u) # update orientation pars self.__exteriorOrientationParameters = np.add(self.__exteriorOrientationParameters, np.reshape(deltaX, 6)) while la.norm(deltaX) > epsilon: l0 = self.__ComputeObservationVector_RzRyRz(groundPoints.T) l0 = np.reshape(l0, (-1, 1)) l = cameraPoints.reshape(np.size(cameraPoints), 1) - l0 A = self.__ComputeDesignMatrix_RzRyRz(groundPoints.T) N = np.dot(A.T, A) u = np.dot(A.T, l) deltaX = np.dot(la.inv(N), u) # update orientation pars self.__exteriorOrientationParameters = np.add(self.__exteriorOrientationParameters, np.reshape(deltaX, 6)) # compute residuals l_a = np.reshape(self.__ComputeObservationVector_RzRyRz(groundPoints.T), (-1, 1)) v = l_a - cameraPoints.reshape(np.size(cameraPoints), 1) if (np.size(A, 0) - np.size(deltaX)) != 0: sig = np.dot(v.T, v) / (np.size(A, 0) - np.size(deltaX)) sigmaX = sig[0] * la.inv(N) else: sigmaX = None return [self.exteriorOrientationParameters, sigmaX, v]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def exteriorOrientationParameters(self):\n return self.__exteriorOrientationParameters", "def ComputeExteriorOrientation(self, imagePoints, groundPoints, epsilon):\n # cameraPoints = self.ImageToCamera(imagePoints)\n cameraPoints = imagePoints\n self.__ComputeApproximateVals(cameraPoi...
[ "0.71254003", "0.69500554", "0.68282026", "0.61255026", "0.61024475", "0.5787343", "0.57387304", "0.5717728", "0.5698031", "0.567763", "0.5626359", "0.55450577", "0.5525408", "0.5507461", "0.54807997", "0.54278624", "0.5404268", "0.53900427", "0.5351375", "0.5299402", "0.5291...
0.6420376
3
Transforming ground points to image points
def GroundToImage(self, groundPoints): X0 = float(self.exteriorOrientationParameters[0]) Y0 = float(self.exteriorOrientationParameters[1]) Z0 = float(self.exteriorOrientationParameters[2]) xp = float(self.camera.principalPoint[0]) yp = float(self.camera.principalPoint[1]) R = self.rotationMatrix r11 = float(R[0, 0]) r12 = float(R[0, 1]) r13 = float(R[0, 2]) r21 = float(R[1, 0]) r22 = float(R[1, 1]) r23 = float(R[1, 2]) r31 = float(R[2, 0]) r32 = float(R[2, 1]) r33 = float(R[2, 2]) f = self.camera.focalLength camPoints = [] for i in range(groundPoints.shape[0]): x = xp - (f) * (((r11 * (groundPoints[i, 0] - X0) + r21 * (groundPoints[i, 1] - Y0) + r31 * ( groundPoints[i, 2] - Z0)) / (r13 * (groundPoints[i, 0] - X0) + r23 * ( groundPoints[i, 1] - Y0) + r33 * (groundPoints[i, 2] - Z0)))) y = yp - (f) * (((r12 * (groundPoints[i, 0] - X0) + r22 * (groundPoints[i, 1] - Y0) + r32 * ( groundPoints[i, 2] - Z0)) / (r13 * (groundPoints[i, 0] - X0) + r23 * ( groundPoints[i, 1] - Y0) + r33 * (groundPoints[i, 2] - Z0)))) camPoints.append([x, y]) # return self.CameraToImage(np.array(camPoints)) return (np.array(camPoints))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def GroundToImage_RzRyRz(self, groundPoints):\n X0 = float(self.exteriorOrientationParameters[0])\n Y0 = float(self.exteriorOrientationParameters[1])\n Z0 = float(self.exteriorOrientationParameters[2])\n\n xp = float(self.camera.principalPoint[0])\n yp = float(self.camera.princip...
[ "0.647679", "0.6467655", "0.6214912", "0.61901563", "0.61635965", "0.60568637", "0.6051804", "0.6028781", "0.60097945", "0.5999861", "0.5963049", "0.59306324", "0.5910674", "0.5894894", "0.5861491", "0.5838941", "0.58371437", "0.5833094", "0.58130443", "0.58091205", "0.580821...
0.7286386
0
Transforming ground points to image points
def GroundToImage_RzRyRz(self, groundPoints): X0 = float(self.exteriorOrientationParameters[0]) Y0 = float(self.exteriorOrientationParameters[1]) Z0 = float(self.exteriorOrientationParameters[2]) xp = float(self.camera.principalPoint[0]) yp = float(self.camera.principalPoint[1]) R = self.rotationMatrix_RzRyRz r11 = float(R[0, 0]) r12 = float(R[0, 1]) r13 = float(R[0, 2]) r21 = float(R[1, 0]) r22 = float(R[1, 1]) r23 = float(R[1, 2]) r31 = float(R[2, 0]) r32 = float(R[2, 1]) r33 = float(R[2, 2]) f = self.camera.focalLength camPoints = [] for i in range(groundPoints.shape[0]): x = xp - (f) * (((r11 * (groundPoints[i, 0] - X0) + r21 * (groundPoints[i, 1] - Y0) + r31 * ( groundPoints[i, 2] - Z0)) / (r13 * (groundPoints[i, 0] - X0) + r23 * ( groundPoints[i, 1] - Y0) + r33 * (groundPoints[i, 2] - Z0)))) y = yp - (f) * (((r12 * (groundPoints[i, 0] - X0) + r22 * (groundPoints[i, 1] - Y0) + r32 * ( groundPoints[i, 2] - Z0)) / (r13 * (groundPoints[i, 0] - X0) + r23 * ( groundPoints[i, 1] - Y0) + r33 * (groundPoints[i, 2] - Z0)))) camPoints.append([x, y]) # return self.CameraToImage(np.array(camPoints)) return (np.array(camPoints))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def GroundToImage(self, groundPoints):\n X0 = float(self.exteriorOrientationParameters[0])\n Y0 = float(self.exteriorOrientationParameters[1])\n Z0 = float(self.exteriorOrientationParameters[2])\n\n xp = float(self.camera.principalPoint[0])\n yp = float(self.camera.principalPoint...
[ "0.7286386", "0.6467655", "0.6214912", "0.61901563", "0.61635965", "0.60568637", "0.6051804", "0.6028781", "0.60097945", "0.5999861", "0.5963049", "0.59306324", "0.5910674", "0.5894894", "0.5861491", "0.5838941", "0.58371437", "0.5833094", "0.58130443", "0.58091205", "0.58082...
0.647679
1
Transforms Image point to a Ray in world system
def ImageToRay(self, imagePoints): pass # delete after implementations
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pointToWorld( nImageX, nImageY, rDepth, rMaxX = 320, rMaxY = 240, rFieldOfViewX = 60, rFieldOfViewY = 40 ):\n # convert to [-0.5,0.5]\n rCenteredX = ( nImageX / rMaxX ) - 0.5;\n rCenteredY = ( nImageY / rMaxY ) - 0.5;", "def project_to_image_plane(self, point_in_world, timestamp):\n\n camera_...
[ "0.69458926", "0.6836335", "0.633762", "0.62048507", "0.6087586", "0.6003151", "0.5976462", "0.5972177", "0.5914467", "0.58434325", "0.58393484", "0.58110195", "0.57184476", "0.5707809", "0.570757", "0.5687363", "0.5659299", "0.56507516", "0.5646074", "0.5632133", "0.5627521"...
0.7174655
0
Compute corresponding ground point given the height in world system
def ImageToGround_GivenZ(self, imagePoints, Z_values): cameraPoints = self.ImageToCamera(imagePoints) cameraPoints = cameraPoints.T pars = self.exteriorOrientationParameters X0 = pars[0] Y0 = pars[1] Z0 = pars[2] T = np.array([[X0], [Y0], [Z0]]) omega = pars[3] phi = pars[4] kappa = pars[5] R = Compute3DRotationMatrix(omega, phi, kappa) f = self.camera.focalLength # allocating memory for return array groundPoints = [] for i in range(len(cameraPoints[1])): camVec = np.insert(cameraPoints[:, i], np.size(cameraPoints), -f) lam = (Z_values - Z0) / (np.dot(R[2, :], camVec)) X = X0 + lam * np.dot(R[0, :], camVec) Y = Y0 + lam * np.dot(R[1, :], camVec) xy = [X, Y, Z_values] groundPoints.append(xy) groundPoints = np.array(groundPoints) return groundPoints
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def template_height(self, x, y):\n\n tx = float(x) / self.world_size\n tx = tx * self.global_template.size\n\n ty = float(y) / self.world_size\n ty = ty * self.global_template.size\n\n tx1 = int(tx)\n dx = tx - tx1\n tx2 = tx1 + 1\n\n ty1 = int(ty)\n d...
[ "0.6243619", "0.5950408", "0.5930064", "0.5907398", "0.5722841", "0.5635606", "0.56119394", "0.55470324", "0.5499355", "0.5497724", "0.5488828", "0.547526", "0.5454269", "0.5429254", "0.5421403", "0.5410944", "0.5403357", "0.53891635", "0.5388756", "0.5382602", "0.5380469", ...
0.0
-1
calculates area of the footprint on the ground focalLength and sensorsize in mm
def castSize(self, scale): return self.camera.sensorSize * scale
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def area(self):\n return (self.baselength1 + self.baselength2)*self.height/2", "def surfaceArea(self):\n surfaceArea = self.sideLength**2 * 6\n return surfaceArea", "def area(self):\n return self.length*self.length", "def calculateDetectorArea(self):\n area = 0.0\n r = s...
[ "0.68501455", "0.6728362", "0.67006356", "0.66854936", "0.6640243", "0.6634641", "0.66335106", "0.6622647", "0.6602097", "0.6584502", "0.65486264", "0.6531883", "0.65018195", "0.6479644", "0.6479644", "0.6473053", "0.6473053", "0.6473053", "0.646595", "0.646595", "0.646595", ...
0.0
-1
Generating grid of points biased by ppa (principal point delta)
def GeneratePointsImg(self, n, ppa): x = np.linspace(0,self.camera.sensorSize,n)+ppa[0] y = np.linspace(0,self.camera.sensorSize,n)+ppa[1] return np.meshgrid(x, y)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def modify_pt(pt=None,):\n global dim\n mod_rand_pt = []\n\n for i_ in range(dim):\n for j_ in range(i_, dim):\n mod_rand_pt.append(pt[i_] * pt[j_])\n\n mod_rand_pt.append(1.)\n return mod_rand_pt", "def projection_P(P_prime):\n sorted_prime = -np.sort(-P_prime, axis=1) # Des...
[ "0.60559386", "0.59712946", "0.59704006", "0.595379", "0.5917291", "0.58962584", "0.58279955", "0.582432", "0.5792295", "0.57677454", "0.5709391", "0.5696328", "0.56858194", "0.56748235", "0.5633881", "0.5615407", "0.56149656", "0.560475", "0.560267", "0.5601162", "0.5589663"...
0.6255561
0
Calculates ray intersection point with plane
def LinePlaneCollision(planeNormal, planePoint, rayDirection, rayPoint, epsilon=1e-12): ndotu = planeNormal.dot(rayDirection) if abs(ndotu) < epsilon: raise RuntimeError("no intersection or line is within plane") w = rayPoint - planePoint si = -planeNormal.dot(w) / ndotu Psi = w + si * rayDirection + planePoint return Psi
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def intersects(self, ray):\n theta = 45\n H = 512\n W = 512\n A = self.origin\n B = Point(W, A.y, A.z)\n C = Point(B.x, (int)(H * math.sin(theta * math.pi / 180)), (int)(H * math.cos(math.pi * theta / 180)))\n D = Point(A.x, (int)(H * math.sin(theta * math.pi / 180)...
[ "0.75932515", "0.7466053", "0.7187562", "0.7109652", "0.71037644", "0.70618355", "0.7053688", "0.70142746", "0.6986311", "0.6911706", "0.691073", "0.68320477", "0.6821069", "0.6723013", "0.6722326", "0.6714333", "0.66570723", "0.6611767", "0.6607392", "0.6602498", "0.6568937"...
0.69176984
9
Compute exterior orientation approximate values via 2D conform transformation
def __ComputeApproximateVals(self, cameraPoints, groundPoints): # Find approximate values cameraPoints = cameraPoints.reshape(np.size(cameraPoints), 1) groundPointsXY = groundPoints[0:2, :].T groundPointsXY = groundPointsXY.reshape(np.size(groundPointsXY), 1) groundPointsZ = groundPoints[2, :].T n = int(len(cameraPoints)) # number of observations u = 4 # 4 conform parameters A = np.zeros((n, u)) # A matrix (n,u) j = 0 for i in range(len(cameraPoints)): if i % 2 == 0: A[i, 0] = 1 A[i, 1] = 0 A[i, 2] = cameraPoints[j] A[i, 3] = cameraPoints[j + 1] else: A[i, 0] = 0 A[i, 1] = 1 A[i, 2] = cameraPoints[j + 1] A[i, 3] = -cameraPoints[j] j += 2 X = np.dot(la.inv(np.dot(np.transpose(A), A)), np.dot(np.transpose(A), groundPointsXY)) # now we can compute the rest of the params X0 = X[0] Y0 = X[1] kappa = np.arctan2(-X[3], X[2]) lam = np.sqrt(X[2] ** 2 + X[3] ** 2) Z0 = np.average(groundPointsZ) + (lam) * self.camera.focalLength adjustment_results = {"X0": X0[0], "Y0": Y0[0], "Z0": Z0[0], "omega": 0, "phi": 0, "kappa": np.rad2deg(kappa[0])} self.__exteriorOrientationParameters = np.array( [X0[0], Y0[0], Z0[0], 0, 0, kappa[0]]).T # updating the exterior orientation params # self.__exteriorOrientationParameters = np.array([202225, 742447, 657.81, 0, 0, kappa[0]]).T #return adjustment_results
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ComputeInnerOrientation(self, imagePoints):\n # implementing observation vectors\n imagePoints = imagePoints.reshape(np.size(imagePoints), 1)\n\n fMarks = self.camera.fiducialMarks.reshape(np.size(self.camera.fiducialMarks), 1)\n\n n = int(len(imagePoints)) # number of observation...
[ "0.6454401", "0.63721627", "0.63071674", "0.6261199", "0.6196916", "0.6190799", "0.61566526", "0.60925615", "0.60682034", "0.6002589", "0.59904027", "0.5930793", "0.5907026", "0.5904838", "0.5902323", "0.58778685", "0.58416617", "0.58258176", "0.58110034", "0.5757958", "0.574...
0.0
-1
Compute exterior orientation approximate values via 2D conform transformation
def __ComputeApproximateVals_RzRyRz(self, cameraPoints, groundPoints): # Find approximate values cameraPoints = cameraPoints.reshape(np.size(cameraPoints), 1) groundPointsXY = groundPoints[0:2, :].T groundPointsXY = groundPointsXY.reshape(np.size(groundPointsXY), 1) groundPointsZ = groundPoints[2, :].T n = int(len(cameraPoints)) # number of observations u = 4 # 4 conform parameters A = np.zeros((n, u)) # A matrix (n,u) j = 0 for i in range(len(cameraPoints)): if i % 2 == 0: A[i, 0] = 1 A[i, 1] = 0 A[i, 2] = cameraPoints[j] A[i, 3] = cameraPoints[j + 1] else: A[i, 0] = 0 A[i, 1] = 1 A[i, 2] = cameraPoints[j + 1] A[i, 3] = -cameraPoints[j] j += 2 X = np.dot(la.inv(np.dot(np.transpose(A), A)), np.dot(np.transpose(A), groundPointsXY)) # now we can compute the rest of the params X0 = X[0] Y0 = X[1] kappa = np.arctan2(-X[3], X[2]) lam = np.sqrt(X[2] ** 2 + X[3] ** 2) Z0 = np.average(groundPointsZ) + (lam) * self.camera.focalLength adjustment_results = {"X0": X0[0], "Y0": Y0[0], "Z0": Z0[0], "omega": 0, "phi": 0, "kappa": np.rad2deg(kappa[0])} self.__exteriorOrientationParameters = np.array( [X0[0], Y0[0], Z0[0], 0.2, 0.2, kappa[0]]).T # updating the exterior orientation params # self.__exteriorOrientationParameters = np.array([202225, 742447, 657.81, 0, 0, kappa[0]]).T #return adjustment_results
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ComputeInnerOrientation(self, imagePoints):\n # implementing observation vectors\n imagePoints = imagePoints.reshape(np.size(imagePoints), 1)\n\n fMarks = self.camera.fiducialMarks.reshape(np.size(self.camera.fiducialMarks), 1)\n\n n = int(len(imagePoints)) # number of observation...
[ "0.6454401", "0.63721627", "0.63071674", "0.6261199", "0.6196916", "0.6190799", "0.61566526", "0.60925615", "0.60682034", "0.6002589", "0.59904027", "0.5930793", "0.5907026", "0.5904838", "0.5902323", "0.58778685", "0.58416617", "0.58258176", "0.58110034", "0.5757958", "0.574...
0.0
-1
Compute observation vector for solving the exterior orientation parameters of a single image based on their approximate values
def __ComputeObservationVector(self, groundPoints): n = groundPoints.shape[0] # number of points # Coordinates subtraction dX = groundPoints[:, 0] - self.exteriorOrientationParameters[0] dY = groundPoints[:, 1] - self.exteriorOrientationParameters[1] dZ = groundPoints[:, 2] - self.exteriorOrientationParameters[2] dXYZ = np.vstack([dX, dY, dZ]) rotated_XYZ = np.dot(self.rotationMatrix.T, dXYZ).T l0 = np.empty(n * 2) # Computation of the observation vector based on approximate exterior orientation parameters: l0[::2] = -self.camera.focalLength * rotated_XYZ[:, 0] / rotated_XYZ[:, 2] l0[1::2] = -self.camera.focalLength * rotated_XYZ[:, 1] / rotated_XYZ[:, 2] return l0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ev2vi(eV,mu): \n return cv*np.sqrt( eV*(eV+2.e0*mu*mpc2))/(eV+mu*mpc2)", "def ComputeInnerOrientation(self, imagePoints):\n # implementing observation vectors\n imagePoints = imagePoints.reshape(np.size(imagePoints), 1)\n\n fMarks = self.camera.fiducialMarks.reshape(np.size(self.c...
[ "0.60023314", "0.5932807", "0.5920514", "0.5843368", "0.58112794", "0.57958204", "0.5776637", "0.573947", "0.5736349", "0.5707544", "0.5702388", "0.5691359", "0.5680941", "0.5660144", "0.5651713", "0.5644874", "0.56244063", "0.55701184", "0.55645734", "0.5539961", "0.5539685"...
0.56527215
14
Compute observation vector for solving the exterior orientation parameters of a single image based on their approximate values
def __ComputeObservationVector_RzRyRz(self, groundPoints): n = groundPoints.shape[0] # number of points # Coordinates subtraction dX = groundPoints[:, 0] - self.exteriorOrientationParameters[0] dY = groundPoints[:, 1] - self.exteriorOrientationParameters[1] dZ = groundPoints[:, 2] - self.exteriorOrientationParameters[2] dXYZ = np.vstack([dX, dY, dZ]) rotated_XYZ = np.dot(self.rotationMatrix_RzRyRz.T, dXYZ).T l0 = np.empty(n * 2) # Computation of the observation vector based on approximate exterior orientation parameters: l0[::2] = -self.camera.focalLength * rotated_XYZ[:, 0] / rotated_XYZ[:, 2] l0[1::2] = -self.camera.focalLength * rotated_XYZ[:, 1] / rotated_XYZ[:, 2] return l0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ev2vi(eV,mu): \n return cv*np.sqrt( eV*(eV+2.e0*mu*mpc2))/(eV+mu*mpc2)", "def ComputeInnerOrientation(self, imagePoints):\n # implementing observation vectors\n imagePoints = imagePoints.reshape(np.size(imagePoints), 1)\n\n fMarks = self.camera.fiducialMarks.reshape(np.size(self.c...
[ "0.600269", "0.5934908", "0.5921452", "0.58419925", "0.58109325", "0.5797084", "0.5777378", "0.57385683", "0.57354975", "0.57078993", "0.5702315", "0.5689945", "0.5679907", "0.5660313", "0.5653539", "0.5650662", "0.56459534", "0.5623185", "0.55718786", "0.55647194", "0.553900...
0.0
-1
Compute the derivatives of the collinear law (design matrix)
def __ComputeDesignMatrix(self, groundPoints): # initialization for readability omega = self.exteriorOrientationParameters[3] phi = self.exteriorOrientationParameters[4] kappa = self.exteriorOrientationParameters[5] # Coordinates subtraction dX = groundPoints[:, 0] - self.exteriorOrientationParameters[0] dY = groundPoints[:, 1] - self.exteriorOrientationParameters[1] dZ = groundPoints[:, 2] - self.exteriorOrientationParameters[2] dXYZ = np.vstack([dX, dY, dZ]) rotationMatrixT = self.rotationMatrix.T rotatedG = rotationMatrixT.dot(dXYZ) rT1g = rotatedG[0, :] rT2g = rotatedG[1, :] rT3g = rotatedG[2, :] focalBySqauredRT3g = self.camera.focalLength / rT3g ** 2 dxdg = rotationMatrixT[0, :][None, :] * rT3g[:, None] - rT1g[:, None] * rotationMatrixT[2, :][None, :] dydg = rotationMatrixT[1, :][None, :] * rT3g[:, None] - rT2g[:, None] * rotationMatrixT[2, :][None, :] dgdX0 = np.array([-1, 0, 0], 'f') dgdY0 = np.array([0, -1, 0], 'f') dgdZ0 = np.array([0, 0, -1], 'f') # Derivatives with respect to X0 dxdX0 = -focalBySqauredRT3g * np.dot(dxdg, dgdX0) dydX0 = -focalBySqauredRT3g * np.dot(dydg, dgdX0) # Derivatives with respect to Y0 dxdY0 = -focalBySqauredRT3g * np.dot(dxdg, dgdY0) dydY0 = -focalBySqauredRT3g * np.dot(dydg, dgdY0) # Derivatives with respect to Z0 dxdZ0 = -focalBySqauredRT3g * np.dot(dxdg, dgdZ0) dydZ0 = -focalBySqauredRT3g * np.dot(dydg, dgdZ0) dRTdOmega = Compute3DRotationDerivativeMatrix(omega, phi, kappa, 'omega').T dRTdPhi = Compute3DRotationDerivativeMatrix(omega, phi, kappa, 'phi').T dRTdKappa = Compute3DRotationDerivativeMatrix(omega, phi, kappa, 'kappa').T gRT3g = dXYZ * rT3g # Derivatives with respect to Omega dxdOmega = -focalBySqauredRT3g * (dRTdOmega[0, :][None, :].dot(gRT3g) - rT1g * (dRTdOmega[2, :][None, :].dot(dXYZ)))[0] dydOmega = -focalBySqauredRT3g * (dRTdOmega[1, :][None, :].dot(gRT3g) - rT2g * (dRTdOmega[2, :][None, :].dot(dXYZ)))[0] # Derivatives with respect to Phi dxdPhi = -focalBySqauredRT3g * (dRTdPhi[0, :][None, :].dot(gRT3g) - rT1g * (dRTdPhi[2, :][None, :].dot(dXYZ)))[0] dydPhi = -focalBySqauredRT3g * (dRTdPhi[1, :][None, :].dot(gRT3g) - rT2g * (dRTdPhi[2, :][None, :].dot(dXYZ)))[0] # Derivatives with respect to Kappa dxdKappa = -focalBySqauredRT3g * (dRTdKappa[0, :][None, :].dot(gRT3g) - rT1g * (dRTdKappa[2, :][None, :].dot(dXYZ)))[0] dydKappa = -focalBySqauredRT3g * (dRTdKappa[1, :][None, :].dot(gRT3g) - rT2g * (dRTdKappa[2, :][None, :].dot(dXYZ)))[0] # all derivatives of x and y dd = np.array([np.vstack([dxdX0, dxdY0, dxdZ0, dxdOmega, dxdPhi, dxdKappa]).T, np.vstack([dydX0, dydY0, dydZ0, dydOmega, dydPhi, dydKappa]).T]) a = np.zeros((2 * dd[0].shape[0], 6)) a[0::2] = dd[0] a[1::2] = dd[1] return a
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def derivatives(x_p, y_p):\r\n # set up the matrix equation\r\n n = x_p.shape[0]\r\n M = np.zeros( [n,n] )\r\n d = np.zeros( [n,1] )\r\n \r\n # fill in the constants where they can be\r\n for i in np.arange(1,n-1 ): # for all but the first and last row\r\n M[i,i-1 ] = ( x_p[i] - x_p[i-...
[ "0.6885692", "0.65035003", "0.62627643", "0.62573814", "0.6247173", "0.6219265", "0.6201201", "0.619879", "0.6149196", "0.6138502", "0.60837907", "0.6081997", "0.6081997", "0.6045166", "0.6043331", "0.6040035", "0.6020596", "0.6014159", "0.600143", "0.59920883", "0.59908885",...
0.0
-1
Compute the derivatives of the collinear law (design matrix)
def __ComputeDesignMatrix_RzRyRz(self, groundPoints): # initialization for readability azimuth = self.exteriorOrientationParameters[3] phi = self.exteriorOrientationParameters[4] kappa = self.exteriorOrientationParameters[5] # Coordinates subtraction dX = groundPoints[:, 0] - self.exteriorOrientationParameters[0] dY = groundPoints[:, 1] - self.exteriorOrientationParameters[1] dZ = groundPoints[:, 2] - self.exteriorOrientationParameters[2] dXYZ = np.vstack([dX, dY, dZ]) rotationMatrixT = self.rotationMatrix_RzRyRz.T rotatedG = rotationMatrixT.dot(dXYZ) rT1g = rotatedG[0, :] rT2g = rotatedG[1, :] rT3g = rotatedG[2, :] focalBySqauredRT3g = self.camera.focalLength / rT3g ** 2 dxdg = rotationMatrixT[0, :][None, :] * rT3g[:, None] - rT1g[:, None] * rotationMatrixT[2, :][None, :] dydg = rotationMatrixT[1, :][None, :] * rT3g[:, None] - rT2g[:, None] * rotationMatrixT[2, :][None, :] dgdX0 = np.array([-1, 0, 0], 'f') dgdY0 = np.array([0, -1, 0], 'f') dgdZ0 = np.array([0, 0, -1], 'f') # Derivatives with respect to X0 dxdX0 = -focalBySqauredRT3g * np.dot(dxdg, dgdX0) dydX0 = -focalBySqauredRT3g * np.dot(dydg, dgdX0) # Derivatives with respect to Y0 dxdY0 = -focalBySqauredRT3g * np.dot(dxdg, dgdY0) dydY0 = -focalBySqauredRT3g * np.dot(dydg, dgdY0) # Derivatives with respect to Z0 dxdZ0 = -focalBySqauredRT3g * np.dot(dxdg, dgdZ0) dydZ0 = -focalBySqauredRT3g * np.dot(dydg, dgdZ0) dRTdOmega = Compute3DRotationDerivativeMatrix_RzRyRz(azimuth, phi, kappa, 'azimuth').T dRTdPhi = Compute3DRotationDerivativeMatrix_RzRyRz(azimuth, phi, kappa, 'phi').T dRTdKappa = Compute3DRotationDerivativeMatrix_RzRyRz(azimuth, phi, kappa, 'kappa').T gRT3g = dXYZ * rT3g # Derivatives with respect to Omega dxdOmega = -focalBySqauredRT3g * (dRTdOmega[0, :][None, :].dot(gRT3g) - rT1g * (dRTdOmega[2, :][None, :].dot(dXYZ)))[0] dydOmega = -focalBySqauredRT3g * (dRTdOmega[1, :][None, :].dot(gRT3g) - rT2g * (dRTdOmega[2, :][None, :].dot(dXYZ)))[0] # Derivatives with respect to Phi dxdPhi = -focalBySqauredRT3g * (dRTdPhi[0, :][None, :].dot(gRT3g) - rT1g * (dRTdPhi[2, :][None, :].dot(dXYZ)))[0] dydPhi = -focalBySqauredRT3g * (dRTdPhi[1, :][None, :].dot(gRT3g) - rT2g * (dRTdPhi[2, :][None, :].dot(dXYZ)))[0] # Derivatives with respect to Kappa dxdKappa = -focalBySqauredRT3g * (dRTdKappa[0, :][None, :].dot(gRT3g) - rT1g * (dRTdKappa[2, :][None, :].dot(dXYZ)))[0] dydKappa = -focalBySqauredRT3g * (dRTdKappa[1, :][None, :].dot(gRT3g) - rT2g * (dRTdKappa[2, :][None, :].dot(dXYZ)))[0] # all derivatives of x and y dd = np.array([np.vstack([dxdX0, dxdY0, dxdZ0, dxdOmega, dxdPhi, dxdKappa]).T, np.vstack([dydX0, dydY0, dydZ0, dydOmega, dydPhi, dydKappa]).T]) a = np.zeros((2 * dd[0].shape[0], 6)) a[0::2] = dd[0] a[1::2] = dd[1] return a
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def derivatives(x_p, y_p):\r\n # set up the matrix equation\r\n n = x_p.shape[0]\r\n M = np.zeros( [n,n] )\r\n d = np.zeros( [n,1] )\r\n \r\n # fill in the constants where they can be\r\n for i in np.arange(1,n-1 ): # for all but the first and last row\r\n M[i,i-1 ] = ( x_p[i] - x_p[i-...
[ "0.6885692", "0.65035003", "0.62627643", "0.62573814", "0.6247173", "0.6219265", "0.6201201", "0.619879", "0.6149196", "0.6138502", "0.60837907", "0.6081997", "0.6081997", "0.6045166", "0.6043331", "0.6040035", "0.6020596", "0.6014159", "0.600143", "0.59920883", "0.59908885",...
0.0
-1
Map label to name.
def label_to_name(label): return "Tree"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def label_to_name(self, label):\n\t\t\treturn self.labels[label]", "def label_to_name(self, label):\n\t\treturn self.labels[label]", "def label_to_name(self, label):\n return self.labels[label]", "def label_name(self) -> str:\n return pulumi.get(self, \"label_name\")", "def get_label_name(lab...
[ "0.8264281", "0.82229835", "0.8194375", "0.71729356", "0.70556575", "0.6895756", "0.6878515", "0.6878004", "0.68344384", "0.682798", "0.6825429", "0.6815113", "0.68029517", "0.6751829", "0.67418474", "0.6715094", "0.6705865", "0.6690326", "0.666572", "0.66346323", "0.65997976...
0.7184514
3
Predict invidiual tree crown bounding boxes for a single image
def predict_image(model, image_path, score_threshold = 0.1, max_detections= 200, return_plot=True): #predict raw_image = cv2.imread(image_path) image = preprocess(raw_image) image, scale = keras_retinanet_image.resize_image(image) if keras.backend.image_data_format() == 'channels_first': image = image.transpose((2, 0, 1)) # run network boxes, scores, labels = model.predict_on_batch(np.expand_dims(image, axis=0))[:3] # correct boxes for image scale boxes /= scale # select indices which have a score above the threshold indices = np.where(scores[0, :] > score_threshold)[0] # select those scores scores = scores[0][indices] # find the order with which to sort the scores scores_sort = np.argsort(-scores)[:max_detections] # select detections image_boxes = boxes[0, indices[scores_sort], :] image_scores = scores[scores_sort] image_labels = labels[0, indices[scores_sort]] image_detections = np.concatenate([image_boxes, np.expand_dims(image_scores, axis=1), np.expand_dims(image_labels, axis=1)], axis=1) if return_plot: draw_detections(raw_image, image_boxes, image_scores, image_labels, label_to_name=label_to_name, score_threshold=score_threshold) return raw_image else: return image_boxes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def predict_trees(deepforest_model, rgb_path, bounds, expand=10):\n #DeepForest is trained on 400m crops, easiest to mantain this approximate size centered on points\n left, bottom, right, top = bounds\n expand_width = (40 - (right - left))/2\n left = left - expand_width\n right = right + expand_wid...
[ "0.6780052", "0.6780052", "0.66179", "0.66036594", "0.6570653", "0.6464624", "0.6442197", "0.6430756", "0.63636744", "0.63617635", "0.629576", "0.6279707", "0.62130153", "0.6206307", "0.61893594", "0.61628205", "0.61508197", "0.6141496", "0.61407137", "0.61125773", "0.6096678...
0.0
-1
loop through a dir and run all images
def predict_all_images(): #Read config config = read_config() #read model model = read_model(config["model_path"], config) tifs = glob.glob(os.path.join("data","**","*.tif")) for tif in tifs: print(tif) prediction = predict_image(model, tif, score_threshold = 0.1, max_detections= 200,return_plot=False) #reshape and save to csv df = pd.DataFrame(prediction) df.columns = ["xmin","ymin","xmax","ymax"] #save boxes file_path = os.path.splitext(tif)[0] + ".csv" df.to_csv(file_path)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process_images():\n create_dirs()\n for root, dirs, files in os.walk(IN):\n for name in files:\n if name[0] == '.':\n continue\n process_image(name)", "def processImages(self):\n for file in os.listdir(self.config[\"tempPath\"]):\n self.logger.debug(\"Calling generateI...
[ "0.78397024", "0.7330907", "0.7104293", "0.7006195", "0.691466", "0.68556714", "0.6756341", "0.66847354", "0.6673864", "0.6630209", "0.6581177", "0.65660363", "0.6560143", "0.6554627", "0.65188265", "0.6516782", "0.65092206", "0.65087897", "0.6484184", "0.6457262", "0.6429507...
0.0
-1
Fixture to clean up logging output before each test.
def before_and_after_each_test(self, caplog): #before each test # Set to capture logs above INFO caplog.set_level(logging.INFO) caplog.clear() yield #after each test
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setUp(self):\n clean_temlogger_config()", "def setUp(self):\n clean_temlogger_config()", "def setUp(self):\n clean_temlogger_config()", "def setUp(self):\n clean_temlogger_config()", "def setUp(self):\n self.path = tempfile.mkdtemp()\n self.log = log.Log(self.p...
[ "0.7646536", "0.7646536", "0.7646536", "0.7646536", "0.7533063", "0.7408682", "0.73239565", "0.73239565", "0.73239565", "0.73239565", "0.73239565", "0.7239843", "0.7204147", "0.7201381", "0.7155247", "0.69071877", "0.6867982", "0.68438387", "0.6788774", "0.675041", "0.6707021...
0.6735152
20
Description When is given a directory name that exist Expected Result returns True
def test_has_directory(self, check_fn_true): #setup has_directory = extractor.make_has_directory(os.path.isdir) #when test1 = has_directory("./data/observed") #result assert test1 is True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_dir_exist():\n if os.path.isdir(path_structure):\n return True\n else:\n return False", "def test_doesnt_have_directory(self, check_fn_false):\n\n # setup\n has_directory = extractor.make_has_directory(os.path.isdir)\n\n # when\n test2 = has_directory(\"....
[ "0.7946343", "0.774155", "0.7725892", "0.7708306", "0.75825566", "0.749166", "0.7445757", "0.74027336", "0.73993707", "0.73776567", "0.73469585", "0.7326901", "0.7297167", "0.7291639", "0.7277154", "0.7249648", "0.72335654", "0.72275263", "0.72248363", "0.71985984", "0.718994...
0.7547904
5
Description When is given a directory name that exist Expected Result Shows log that directory was found
def test_has_directory_log(self, check_fn_true, caplog): #setup records = caplog.records has_directory = extractor.make_has_directory(os.path.isdir) directory_path = "./data/observed" #when test1 = has_directory(directory_path) #result assert len(records) == 1 assert records[0].message == f"It was found directory {directory_path}"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_doesnt_have_directory_log(self, check_fn_false, caplog):\n\n #setup\n records = caplog.records\n has_directory = extractor.make_has_directory(os.path.isdir)\n directory_path = \"./data/tests\"\n \n #when\n test2 = has_directory(directory_path)\n\n #r...
[ "0.74612963", "0.66516936", "0.6648649", "0.6637668", "0.6592905", "0.65451527", "0.6507091", "0.6476768", "0.6445445", "0.64397144", "0.641731", "0.6323137", "0.6317305", "0.6291415", "0.6287829", "0.6264551", "0.62638617", "0.62398607", "0.6232754", "0.6224753", "0.62079227...
0.7359447
1
Description When is given a directory name that doesnt exist Expected Result returns False
def test_doesnt_have_directory(self, check_fn_false): # setup has_directory = extractor.make_has_directory(os.path.isdir) # when test2 = has_directory("./data/tests") # result assert test2 is False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_dir_exist():\n if os.path.isdir(path_structure):\n return True\n else:\n return False", "def __is_directory_name(filename):\n return filename[-1] == '/'", "def is_valid_directory(args):\n if args.directory is not None:\n return True\n return False", "def is_d...
[ "0.7637506", "0.7587286", "0.75317335", "0.7509946", "0.7493378", "0.74204826", "0.74024594", "0.7333082", "0.7327076", "0.7321167", "0.73203564", "0.72724897", "0.7250266", "0.7245775", "0.7242602", "0.7218288", "0.7178371", "0.71603537", "0.71599776", "0.714316", "0.7106386...
0.7918296
0
Description When is given a directory name that doesnt exist Expected Result Shows log that directory wasn't found
def test_doesnt_have_directory_log(self, check_fn_false, caplog): #setup records = caplog.records has_directory = extractor.make_has_directory(os.path.isdir) directory_path = "./data/tests" #when test2 = has_directory(directory_path) #result assert len(records) == 1 assert records[0].message == f"It wasn't found directory {directory_path}"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testNoSuchDirectory(self):\n\n self.assertRaises(OSError,\n parse_package,\n \"no_such_directory\")", "def test_ensure_dir_exists(self):\n pass", "def check_dir(directory: str, err_string: str) -> None:\n if not pathlib.Path(directory).is_d...
[ "0.7097761", "0.69815487", "0.6958049", "0.69345635", "0.68600756", "0.6768691", "0.67403984", "0.67001784", "0.66480124", "0.65875924", "0.6568089", "0.6561216", "0.6556799", "0.6535799", "0.6534498", "0.6532836", "0.6514325", "0.65090376", "0.64824975", "0.6481356", "0.6477...
0.7614185
0
Description When is given a directory path that exist and has csv files Expected Result returns array with paths of files
def test_get_filepaths(self): #setup get_filepaths = extractor.make_get_filepaths(self.mock_get_files_fn) #when test1 = get_filepaths("./dir1", ".csv") #result assert len(test1) == 2
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_csv_in_path(self, path):\n files = os.listdir((path))\n return files", "def get_file_list(rootdir): #{{{\n file_list = []\n for f in os.listdir(rootdir):\n if f == None or not f.endswith(\".csv\"):\n continue\n file_list.append(os.path.join(rootdir, f))\n ...
[ "0.73693734", "0.70882964", "0.6973099", "0.69681597", "0.6596682", "0.6555726", "0.6549086", "0.6546537", "0.64897746", "0.64868534", "0.6455237", "0.64520305", "0.63143605", "0.6301336", "0.62846804", "0.62800604", "0.6277007", "0.62760013", "0.62456584", "0.6236947", "0.62...
0.6765397
4
Description When is given a directory path that exist and doesn't have csv files Expected Result returns array with paths of files
def test_get_filepaths_empty(self): #setup get_filepaths = extractor.make_get_filepaths(self.mock_get_files_fn) #when test2 = get_filepaths("./dir2", ".c") #result assert len(test2) == 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_file_list(rootdir): #{{{\n file_list = []\n for f in os.listdir(rootdir):\n if f == None or not f.endswith(\".csv\"):\n continue\n file_list.append(os.path.join(rootdir, f))\n \n return file_list", "def get_csv_in_path(self, path):\n files = os.listdir((pa...
[ "0.70320946", "0.67621905", "0.6713793", "0.6638676", "0.66279495", "0.65001047", "0.6399279", "0.63733476", "0.63562477", "0.6341865", "0.63009876", "0.6289475", "0.6229904", "0.62287295", "0.6151689", "0.61245257", "0.61133426", "0.61005086", "0.60860026", "0.60531443", "0....
0.6132733
15
Description When is given a directory path that has observed as parent folder and csv file with desired name Expected Result returns dictionary with right data
def test_observed_folder_path(self): #setup filepath = ".data/observed/Abadia-BA_-11.56_-37.52.csv" expected_result = { "type": "observed", "city": "Abadia", "state": "BA", "coordinates": ['-11.56', '-37.52'], "observed": {} } #result assert extractor.get_metadata_from_filepath(filepath) == expected_result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_descendant_file_path(parent_path):\n csv_relative_path = []\n for root, dirs, files in os.walk(parent_path):\n for file in files:\n words = file.split(r'.')\n if words[-1] == 'csv':\n file_path = os.path.join(parent_path, file)\n ...
[ "0.64571184", "0.64260465", "0.62206316", "0.5949646", "0.5880635", "0.5827373", "0.5811057", "0.57590806", "0.57238656", "0.5693332", "0.56806904", "0.56777036", "0.5620818", "0.55884403", "0.55691016", "0.55604094", "0.5549872", "0.55385315", "0.55111617", "0.5508465", "0.5...
0.5314282
43
Description When is given a directory path that has forecast as parent folder and csv file with desired name Expected Result returns dictionary with right data
def test_forecast_folder_path(self): #setup filepath = ".data/forecast/Kano-KN_-9.09_7.39.json" expected_result = { "type": "forecast", "city": "Kano", "state": "KN", "coordinates": ['-9.09', '7.39'], "forecast": {} } #result assert extractor.get_metadata_from_filepath(filepath) == expected_result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_isys_output(path_to_csv,directory_details):\n isys_results=open(path_to_csv).readlines()\n partial_paths_list=[]\n #below we are starting with the second row because the first row has the column\n # headings \n start=1\n for item in isys_results[start:]:\n partial_path=item.split...
[ "0.60329616", "0.5951354", "0.5933117", "0.59185493", "0.5806007", "0.57351774", "0.5663706", "0.5627654", "0.562072", "0.55951595", "0.55628073", "0.55555654", "0.5554841", "0.5539798", "0.5537601", "0.5528273", "0.5500308", "0.5499843", "0.54943573", "0.54920584", "0.548131...
0.6474041
0
Description When is given a directory path that has no forecast or observed as parent folder and csv file with not desired name Expected Result returns dictionary with values empty
def test_invalid_file_path(self): # Test with an invalid file path #setup filepath = ".data/kano/test.txt" expected_result = { "type": "", "city": "", "state": "", "coordinates": ["", ""], '': {} } #result assert extractor.get_metadata_from_filepath(filepath) == expected_result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_observed_folder_path(self):\n\n #setup\n filepath = \".data/observed/Abadia-BA_-11.56_-37.52.csv\"\n expected_result = {\n \"type\": \"observed\",\n \"city\": \"Abadia\",\n \"state\": \"BA\",\n \"coordinates\": ['-11.56', '-37.52'],\n ...
[ "0.65036917", "0.6375648", "0.6118687", "0.59009016", "0.5858981", "0.5716339", "0.5682048", "0.551454", "0.5508633", "0.55065745", "0.5505938", "0.54979366", "0.54892653", "0.5485146", "0.5470703", "0.54683197", "0.5457055", "0.54512775", "0.5430467", "0.5412322", "0.5408284...
0.5297656
33
Description When is given a csv_filepath and output_filepath and its the first time reading it Expected Result creates a json file with right values
def test_first_time_reading_csv_file(self): # Create a temporary directory for test files temp_dir = "test_files/observed" os.makedirs(temp_dir, exist_ok=True) # Create a test CSV file csv_filepath = os.path.join(temp_dir, "Abadia-BA_-11.56_-37.52.csv") with open(csv_filepath, "w", newline="") as csv_file: writer = csv.writer(csv_file, delimiter=";") writer.writerow(["periods", "precipitation", "temperature", "max_temperature"]) writer.writerow(["2023-01-01", "5", "25", "30"]) writer.writerow(["2023-01-02", "10", "23", "28"]) # Define the expected output JSON file path expected_output_filepath = os.path.join(temp_dir, "BA_Abadia.json") # Call the function under test extractor.csv_to_json(csv_filepath, temp_dir) # Verify that the output JSON file exists assert os.path.exists(expected_output_filepath) # Load the output JSON file with open(expected_output_filepath, "r") as json_file: json_data = json.load(json_file) # Verify the contents of the JSON file expected_data = { "city": "Abadia", "state": "BA", "coordinates": ["-11.56", "-37.52"], "observed": { "periods": ["2023-01-01", "2023-01-02"], "precipitation": ["5", "10"], "temperature": ["25", "23"], "max_temperature": ["30", "28"] } } assert json_data == expected_data # Clean up the temporary directory and files os.remove(csv_filepath) os.remove(expected_output_filepath) os.rmdir(temp_dir)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_json_from_csv(csv_file, delimiter, cols_delimiter, keep, dic_types, infer_types, max_docs, json_file, per_line):\n\n # Get header of csv\n header_csv = get_header_csv(csv_file, cols_delimiter)\n\n # Create structure of json\n print(' [INFO] Creating json\\'s structure')\n jstruct = creat...
[ "0.711705", "0.6895903", "0.6818029", "0.6596224", "0.6538705", "0.6498996", "0.647413", "0.64032245", "0.632765", "0.63111657", "0.6258295", "0.62576884", "0.62437224", "0.6213409", "0.6196973", "0.6165735", "0.6092725", "0.6078405", "0.607367", "0.60391676", "0.6012014", ...
0.7492845
0
Description When is given a csv_filepath and output_filepath and already exists the file Expected Result concatenate the old json file with the values found in 2nd reading.
def test_when_file_already_exist(self): # Create a temporary directory for test files temp_dir = ["test_files/observed", "test_files/forecast", "test_files/output"] for dir in temp_dir: os.makedirs(dir, exist_ok=True) # Create the 1st csv file first_csv_filepath = os.path.join(temp_dir[0], "Abadia-BA_-11.56_-37.52.csv") with open(first_csv_filepath, "w", newline="") as csv_file: writer = csv.writer(csv_file, delimiter=";") writer.writerow(["periods", "precipitation", "temperature", "max_temperature"]) writer.writerow(["2023-01-01", "5", "25", "30"]) writer.writerow(["2023-01-02", "10", "23", "28"]) # Creating the 2nd csv file in different directory second_csv_filepath = os.path.join(temp_dir[1], "Abadia-BA_-11.56_-37.52.csv") with open(second_csv_filepath, "w", newline="") as csv_file: writer = csv.writer(csv_file, delimiter=";") writer.writerow(["periods", "precipitation", "temperature", "max_temperature"]) writer.writerow(["2023-01-01", "5", "25", "30"]) writer.writerow(["2023-01-02", "10", "23", "28"]) # Define the expected output JSON file path expected_output_filepath = os.path.join(temp_dir[2], "BA_Abadia.json") # Call the function under test extractor.csv_to_json(first_csv_filepath, temp_dir[2]) extractor.csv_to_json(second_csv_filepath, temp_dir[2]) # Verify that the output JSON file exists assert os.path.exists(expected_output_filepath) # Load the output JSON file with open(expected_output_filepath, "r") as json_file: json_data = json.load(json_file) # Verify the contents of the JSON file expected_data = { "city": "Abadia", "state": "BA", "coordinates": ["-11.56", "-37.52"], "observed": { "periods": ["2023-01-01", "2023-01-02"], "precipitation": ["5", "10"], "temperature": ["25", "23"], "max_temperature": ["30", "28"] }, "forecast": { "periods": ["2023-01-01", "2023-01-02"], "precipitation": ["5", "10"], "temperature": ["25", "23"], "max_temperature": ["30", "28"] }, } # Assertion assert json_data == expected_data # Clean up the temporary directory and files os.remove(first_csv_filepath) os.remove(second_csv_filepath) os.remove(expected_output_filepath) for dir in temp_dir: os.rmdir(dir)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def collate_similar_data(input_csv_file_path, output_csv_file_path):\n if not input_csv_file_path or not output_csv_file_path:\n return\n with open(output_csv_file_path, 'w') as file_object:\n csv_writer = csv.writer(file_object, delimiter=',')\n csv_writer.writerow(\n (...
[ "0.66875005", "0.65311", "0.64833546", "0.6277665", "0.6257637", "0.5997841", "0.5968701", "0.58972067", "0.5896129", "0.5890339", "0.58322793", "0.5827144", "0.580506", "0.57693833", "0.5767864", "0.5731467", "0.57120144", "0.56666887", "0.56376743", "0.5627514", "0.5622011"...
0.69841707
0
Description When is given a csv_filepath and output_filepath and one of the columns has blank character Expected Result creates a json file ignoring blank column
def test_blank_column(self): # Create a temporary directory for test files temp_dir = "test_files/observed" os.makedirs(temp_dir, exist_ok=True) # Create a test CSV file csv_filepath = os.path.join(temp_dir, "Abadia-BA_-11.56_-37.52.csv") with open(csv_filepath, "w", newline="") as csv_file: writer = csv.writer(csv_file, delimiter=";") writer.writerow(["periods", "precipitation", "temperature", ""]) writer.writerow(["2023-01-01", "5", "25", ""]) writer.writerow(["2023-01-02", "10", "23", ""]) # Define the expected output JSON file path expected_output_filepath = os.path.join(temp_dir, "BA_Abadia.json") # Call the function under test extractor.csv_to_json(csv_filepath, temp_dir) # Verify that the output JSON file exists assert os.path.exists(expected_output_filepath) # Load the output JSON file with open(expected_output_filepath, "r") as json_file: json_data = json.load(json_file) # Verify the contents of the JSON file expected_data = { "city": "Abadia", "state": "BA", "coordinates": ["-11.56", "-37.52"], "observed": { "periods": ["2023-01-01", "2023-01-02"], "precipitation": ["5", "10"], "temperature": ["25", "23"] } } assert json_data == expected_data # Clean up the temporary directory and files os.remove(csv_filepath) os.remove(expected_output_filepath) os.rmdir(temp_dir)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_to_csv_with_no_rows_returns_none(self):\n output = row_handling.to_csv(rows=[], csv_path=self.csv_path)\n assert output is None", "def write_csv_file(csv_output_file, full_data):\n j = 0\n csv_file_path = make_dir(csv_output_file)\n\n # csv_file_path = os.path.join(csv_file_path, ...
[ "0.6778147", "0.66215456", "0.6587748", "0.6419415", "0.6071375", "0.60661834", "0.6063572", "0.6040226", "0.60079324", "0.59419805", "0.5918246", "0.5916295", "0.5871361", "0.58706796", "0.58664465", "0.5850558", "0.5849682", "0.5848059", "0.5840836", "0.58217335", "0.577176...
0.6656623
1
builds the test suite.
def test_suite(): suite = unittest.TestSuite() suite.addTests(unittest.makeSuite(PrimesTests)) suite.addTests(unittest.makeSuite(OtherTests)) return suite
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setUpSuite():\n global _output_dir\n global _suite_configured\n\n if _suite_configured:\n return\n\n def remove_output_dir():\n global _output_dir\n if _output_dir != '':\n try:\n shutil.rmtree(_output_dir)\n except FileNotFoundError:\n ...
[ "0.72579163", "0.70755035", "0.69487077", "0.693024", "0.68874764", "0.6838446", "0.67889327", "0.6781857", "0.6769531", "0.67448074", "0.6737396", "0.67239064", "0.6713449", "0.67040795", "0.6690259", "0.6676821", "0.6676821", "0.66717535", "0.66633546", "0.66531706", "0.665...
0.6828355
6
Plots the path from start node to goal region as well as the graph (or tree) searched with the Sampling Based Algorithms.
def draw_results(algo_name, path, V, E, env, bounds, object_radius, resolution, start_pose, goal_region, elapsed_time): graph_size = len(V) path_size = len(path) # Calculate path length path_length = 0.0 for i in xrange(len(path)-1): path_length += euclidian_dist(path[i], path[i+1]) # Create title with descriptive information based on environment, path length, and elapsed_time title = algo_name + "\n" + str(graph_size) + " Nodes. " + str(len(env.obstacles)) + " Obstacles. Path Size: " + str(path_size) + "\n Path Length: " + str(path_length) + "\n Runtime(s)= " + str(elapsed_time) # Plot environment env_plot = plot_environment(env, bounds) # Add title env_plot.set_title(title) # Plot goal plot_poly(env_plot, goal_region, 'green') # Plot start buffered_start_vertex = Point(start_pose).buffer(object_radius, resolution) plot_poly(env_plot, buffered_start_vertex, 'red') # Plot Edges explored by ploting lines between each edge for edge in E: line = LineString([edge[0], edge[1]]) plot_line(env_plot, line) # Plot path plot_path(env_plot, path, object_radius)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def display_global_path(start, goal, path, occupancy_grid):\n # Displaying the map\n fig_astar, ax_astar = display_map(occupancy_grid)\n\n # Plot the best path found and the list of visited nodes\n ax_astar.plot(path[0], path[1], marker=\"o\", color='blue');\n ax_astar.scatter(start[0], start[1], ma...
[ "0.66848135", "0.6656645", "0.65066147", "0.6478617", "0.64643204", "0.6413952", "0.63114595", "0.630562", "0.6239466", "0.61826295", "0.61755365", "0.61461246", "0.611502", "0.6114483", "0.6070725", "0.60319483", "0.6004294", "0.5978453", "0.5932531", "0.58990204", "0.585567...
0.5336481
80
Delete a log file.
def delete_log(file_path): if os.path.exists(file_path): print('Deleting log %s...' % file_path) os.remove(file_path) else: raise ValueError("File %r doesn't exists - cannot delete." % file_path)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_log():\n log_path = Path.cwd() / \"premise.log\"\n if log_path.exists():\n log_path.unlink()", "def delete_file(fileName):\n os.remove(fileName)\n print (\"Deleteing file: \" + str(fileName))\n write_log()\n read_log()", "def delete_log(filename):\n log_di...
[ "0.77605826", "0.7727856", "0.73945826", "0.7313134", "0.72794944", "0.7045915", "0.7009386", "0.6897459", "0.6797494", "0.6758738", "0.6743915", "0.6732991", "0.671415", "0.66890925", "0.6686626", "0.6566584", "0.6561632", "0.6561151", "0.65555596", "0.6545941", "0.6545941",...
0.8215973
0
Create a new Logger.
def __init__(self, file_path, print_too=True, override=False): self.file_path = file_path self.print_too = print_too if override: if os.path.exists(file_path): print('Overriding - deleting previous log...') os.remove(file_path) os.makedirs(os.path.dirname(file_path), exist_ok=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_logger() -> logging.Logger:\n pass # TODO: Replace with implementation!", "def create_logger():\r\n global logger\r\n logger = logging.getLogger(logger_name)\r\n\r\n formatter = logging.Formatter(fmt='%(asctime)s %(levelname)s %(message)s')\r\n \r\n handler = logging.StreamHandler()...
[ "0.82856834", "0.7896472", "0.77731115", "0.7539057", "0.7495882", "0.7493682", "0.7396028", "0.73747236", "0.7198107", "0.71252424", "0.71039444", "0.7093442", "0.7016825", "0.69982064", "0.6978802", "0.69182163", "0.6911099", "0.6907273", "0.6900183", "0.6848638", "0.683535...
0.0
-1
retorna o valor de graus Farenheit convertido para Celsius
def toCelsius(farenheit): return (farenheit - 32)*5 / 9
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_f_to_c(temp_in_farenheit): ## ##\n celsiustemp = round((temp_in_farenheit - 32) * 5/9, 1) ##\n return celsiustemp ##", "def convert_f_to_c(temp_in_f...
[ "0.8010471", "0.7904015", "0.7749893", "0.7748052", "0.77476305", "0.77396774", "0.76843536", "0.7657182", "0.76182777", "0.7557405", "0.74652946", "0.72664905", "0.7263454", "0.7263454", "0.7261374", "0.72476894", "0.7239142", "0.72153395", "0.7209384", "0.7199643", "0.71753...
0.8065382
0
Return github API URL as string
def get_api_url(self): url = 'https://api.{}/repos/{}/{}/git/'.format(HOST_GITHUB, \ self.repo, self.product) return url
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def github_url(self):\n return self.github.replace('.git', '')", "def github_link(self):\n if self.test_type == TestType.commit:\n test_type = 'commit'\n test_id = self.commit\n else:\n test_type = 'pull'\n test_id = self.pr_nr\n\n return f\...
[ "0.79492223", "0.72487265", "0.7181632", "0.7154561", "0.7106888", "0.6985907", "0.6885024", "0.68606704", "0.682998", "0.6809253", "0.6746705", "0.67292404", "0.6727469", "0.67142564", "0.6706738", "0.66876775", "0.66692704", "0.66497993", "0.6614878", "0.6613082", "0.658479...
0.83484757
0
Get all tags as json from Github API.
def get_tags(self): return self.get_url_data(self.api_url + 'refs/tags')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_all_tags():\n try:\n data = ReadTag().run()\n except Exception as ex:\n return jsonify({'code': '500','message':'Internal server error'})\n else:\n return jsonify({'code': '200','data': data})", "def tags(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'tags')\r\n\r...
[ "0.7320748", "0.71714383", "0.7113308", "0.6959439", "0.69185", "0.6898857", "0.6868199", "0.6557756", "0.63590163", "0.63203824", "0.630982", "0.63082796", "0.62603027", "0.61560315", "0.61486876", "0.6137396", "0.60915667", "0.60803205", "0.60155445", "0.5989724", "0.595926...
0.6558411
7
Get a specific tag's data from Github API.
def get_tag(self, sha): return self.get_url_data(self.api_url + 'tags/' + sha)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_tag(self, tag):\n resp = self.get(_u.build_uri(\"tag\", domain=self.domain),\n data={'tag': tag})\n return utils.handle_response(resp)", "def find_by_id(self, tag, params={}, **options):\n path = \"/tags/%s\" % (tag)\n return self.client.get(path, params...
[ "0.67847025", "0.6460267", "0.63765323", "0.6361611", "0.6254757", "0.6193532", "0.6165435", "0.5924588", "0.5913108", "0.5913108", "0.5889115", "0.58397275", "0.5831963", "0.58196306", "0.581669", "0.57963234", "0.57881856", "0.57807654", "0.57554406", "0.57134587", "0.56990...
0.65368634
1
Github API can only return all tags, but we only want the latest.
def get_latest_tags(self): start = len(self.tags) - self.num_comparisons tags = self.tags latest = [] for i in xrange(len(tags)): if i >= start: parts = tags[i]['ref'].split('/') release_num = parts[2] sha = tags[i]['object']['sha'] tag = [release_num, sha] latest.append(tag) return latest
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def latest_github_tag():\n release_tags_github_url = \"https://api.github.com/repos/rackerlabs/openstack-guest-agents-unix/tags\"\n release_tags_json = urllib2.urlopen(release_tags_github_url)\n release_tags_data = json.load(release_tags_json)\n return str(release_tags_data[0]['name'])[1:]", "def do_...
[ "0.7582535", "0.67998123", "0.65141267", "0.64969236", "0.6446542", "0.640628", "0.6393738", "0.6162547", "0.5983191", "0.5975903", "0.59552914", "0.5918902", "0.5868156", "0.58608156", "0.5853621", "0.58516884", "0.5850484", "0.5837113", "0.5833859", "0.5821743", "0.5812356"...
0.6986198
1
Return github tag release URL as string
def get_url_tag_release(self, release_num): url = 'https://{}/{}/{}/releases/tag/{}'.format( HOST_GITHUB, self.repo, self.product, release_num ) return url
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def git_url(fp: str) -> str:\n return f\"https://github.com/pantsbuild/pants/blob/release_{PANTS_SEMVER}/{fp}\"", "def get_url_tag_commit(self, git_sha):\n\n url = 'https://{}/{}/{}/commit/{}'.format(\n HOST_GITHUB,\n self.repo,\n self.product,\n git_sha\n ...
[ "0.7346837", "0.7119125", "0.6906928", "0.6849196", "0.6814661", "0.6633057", "0.6601647", "0.65425444", "0.65085566", "0.6498361", "0.6480949", "0.6473229", "0.63616604", "0.63453585", "0.63183016", "0.62817633", "0.62013495", "0.6147767", "0.61303365", "0.61104536", "0.6100...
0.7984513
0
Return github tag commit SHA URL as string
def get_url_tag_commit(self, git_sha): url = 'https://{}/{}/{}/commit/{}'.format( HOST_GITHUB, self.repo, self.product, git_sha ) return url
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def git_url(fp: str) -> str:\n return f\"https://github.com/pantsbuild/pants/blob/release_{PANTS_SEMVER}/{fp}\"", "def github_url(self):\n return self.github.replace('.git', '')", "def repo_link(repo):\n return \"https://github.com/\" + repo", "def url_for(self: Self, commit_sha: str, path: str,...
[ "0.71973187", "0.69282585", "0.6792901", "0.6748358", "0.66634905", "0.6656634", "0.6605502", "0.6590671", "0.65770376", "0.6560338", "0.6532101", "0.65264523", "0.6505128", "0.6500491", "0.6499007", "0.64897436", "0.63229996", "0.6280397", "0.6262036", "0.6240915", "0.619329...
0.8073723
0
Return github compare URL as string
def get_comparison(self, start, end): return 'https://{}/{}/{}/compare/{}...{}'.format(HOST_GITHUB, \ self.repo, self.product, start, end) + '\n'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def github_url(self):\n return self.github.replace('.git', '')", "def github_link(self):\n if self.test_type == TestType.commit:\n test_type = 'commit'\n test_id = self.commit\n else:\n test_type = 'pull'\n test_id = self.pr_nr\n\n return f\...
[ "0.7844604", "0.739291", "0.73613286", "0.73068553", "0.69562215", "0.6901379", "0.6821557", "0.6768966", "0.671453", "0.6567272", "0.65653217", "0.6540621", "0.6515569", "0.6500115", "0.6459063", "0.64307237", "0.63972664", "0.63898844", "0.63744414", "0.6369827", "0.6300801...
0.701876
4
Parse CHANGELOG for latest tag.
def get_changelog(self, commit_sha): url = 'https://{}/{}/{}/' + commit_sha + '/CHANGELOG' url = url.format(HOST_GITHUB_RAW, self.repo, self.product) req = requests.get(url) lines = req.text first = self.latest_tags[self.num_comparisons - 1][VERS] last = self.latest_tags[self.num_comparisons - 2][VERS] flag = False log = '' for line in lines.splitlines(): if first in line: flag = True if last in line: flag = False if flag: log += line + '\n' return log
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _parse_latest_update(self, resp: Dict[str, Any], latest_version: str) -> str:\n latest_release = resp.get(\"releases\", {}).get(latest_version)\n if latest_release is not None and isinstance(latest_release, list):\n release_artifact_dates = []\n for artifact in latest_releas...
[ "0.616343", "0.6080337", "0.6000073", "0.59245783", "0.58972937", "0.5849398", "0.58468693", "0.57212126", "0.569988", "0.56685996", "0.5651651", "0.5609761", "0.5598691", "0.5501291", "0.549835", "0.54694337", "0.5457894", "0.5446662", "0.5412487", "0.53381616", "0.53150606"...
0.6108666
1
Constructs release notes for Bugzilla service deployment ticket.
def get_release_notes(self): notes = self.output.get_header('RELEASE NOTES') notes += 'https://{}/{}/{}/releases'.format(HOST_GITHUB, \ self.repo, self.product) + '\n' notes += self.output.get_sub_header('COMPARISONS') notes += self.get_comparison(self.latest_tags[0][VERS], self.latest_tags[1][VERS]) if len(self.latest_tags) >= (MAX_COMPARISONS_TO_SHOW - 1): notes += self.get_comparison(self.latest_tags[1][VERS], self.latest_tags[2][VERS]) if len(self.latest_tags) >= MAX_COMPARISONS_TO_SHOW: notes += self.get_comparison(self.latest_tags[2][VERS], self.latest_tags[3][VERS]) tag_data = self.get_tag(self.latest_tags[3][SHA]) notes += self.output.get_sub_header('TAGS') notes += self.get_url_tag_release(self.latest_tags[3][VERS]) + '\n' notes += self.get_url_tag_commit(tag_data["object"]["sha"]) + '\n' changelog = self.get_changelog(tag_data["object"]["sha"]) if changelog: notes += self.output.get_sub_header('CHANGELOG') notes += changelog return notes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def release_notes(version, author, git_ref_target, git_ref_source, build_type):\n print('generating release notes')\n if git_ref_source:\n if git_ref_source != 'HEAD':\n git_ref_source = 'origin/{}'.format(git_ref_source)\n changelog = run('git log origin/{}..{}'.format(git_ref_targe...
[ "0.7133973", "0.62649", "0.6180011", "0.61652756", "0.57979625", "0.5698693", "0.56655735", "0.56425494", "0.55498487", "0.55108356", "0.5495176", "0.5493605", "0.5473608", "0.54344285", "0.5391342", "0.5365955", "0.5322172", "0.5317257", "0.5311151", "0.52918744", "0.5264947...
0.6374402
1
Gets the confidence of this PcrTestRecordResult.
def confidence(self): return self._confidence
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def confidence(self) -> float:\n return self._confidence", "def confidence(self) -> float:\n return float(self.class_scores[self.class_num])", "def detection_confidence(self):\n return self._detection_confidence", "def get_medie_confidence(self):\n return self.__medie_confidence",...
[ "0.74183095", "0.695079", "0.6752761", "0.62172127", "0.6101365", "0.59268916", "0.59007055", "0.58964837", "0.58102906", "0.5795349", "0.5786901", "0.57829434", "0.5746195", "0.56174123", "0.56115687", "0.5601758", "0.55095553", "0.55019873", "0.545608", "0.5437329", "0.5400...
0.7580267
1
Sets the confidence of this PcrTestRecordResult.
def confidence(self, confidence): self._confidence = confidence
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def confidence(self, confidence: float):\n\n self._confidence = confidence", "def confidence(self, confidence):\n\n self._confidence = confidence", "def confidence(self, confidence):\n\n self._confidence = confidence", "def confidence(self) -> float:\n return self._confidence", ...
[ "0.73178154", "0.72186536", "0.72186536", "0.64052767", "0.624179", "0.624179", "0.6131234", "0.6131234", "0.60332805", "0.60332805", "0.5945399", "0.59088707", "0.5768737", "0.5661176", "0.51978236", "0.5128352", "0.5126153", "0.5094373", "0.5067891", "0.50525135", "0.501097...
0.72880876
2
Returns the model properties as a dict
def to_dict(self): result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: if attr in self.sensitive_list: result[attr] = "****" else: result[attr] = value return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_dict(self):\n return self.properties", "def to_dict(self):\n return self.properties", "def get_properties(self):\n return self.properties", "def asdict(self):\n return self._prop_dict", "def json(self):\n rv = {\n prop: getattr(self, prop)\n f...
[ "0.7751993", "0.7751993", "0.73391134", "0.7334895", "0.7297356", "0.727818", "0.7159078", "0.71578115", "0.71494967", "0.71494967", "0.71283495", "0.71275014", "0.7122587", "0.71079814", "0.7060394", "0.7043251", "0.7034103", "0.70233124", "0.69635814", "0.69586295", "0.6900...
0.0
-1
Returns the string representation of the model
def to_str(self): import simplejson as json if six.PY2: import sys reload(sys) sys.setdefaultencoding("utf-8") return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __str__(self):\n return super().__str__() + self.model.__str__()", "def __str__(self) -> str:\n # noinspection PyUnresolvedReferences\n opts = self._meta\n if self.name_field:\n result = str(opts.get_field(self.name_field).value_from_object(self))\n else:\n ...
[ "0.85856134", "0.7814518", "0.77898884", "0.7751367", "0.7751367", "0.7712228", "0.76981676", "0.76700574", "0.7651133", "0.7597206", "0.75800353", "0.7568254", "0.7538184", "0.75228703", "0.7515832", "0.7498764", "0.74850684", "0.74850684", "0.7467648", "0.74488163", "0.7442...
0.0
-1
Returns true if both objects are equal
def __eq__(self, other): if not isinstance(other, PcrTestRecordResult): return False return self.__dict__ == other.__dict__
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __eq__(self, other):\n return are_equal(self, other)", "def __eq__(self, other):\n return are_equal(self, other)", "def __eq__(self,other):\n try: return self.object==other.object and isinstance(self,type(other))\n except: return False", "def __eq__(self, other):\n if i...
[ "0.8088132", "0.8088132", "0.8054589", "0.7982687", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", ...
0.0
-1
Returns true if both objects are not equal
def __ne__(self, other): return not self == other
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __ne__(self, other: object) -> bool:\n if self.__eq__(other):\n return False\n return True", "def __ne__(self, other: object) -> bool:\n return not self.__eq__(other)", "def __ne__(self, other) -> bool:\n return not self.__eq__(other)", "def __eq__(self, other):\n ...
[ "0.845611", "0.8391477", "0.8144138", "0.81410587", "0.8132492", "0.8093973", "0.80920255", "0.80920255", "0.80920255", "0.8085325", "0.8085325", "0.8076365", "0.8076365", "0.8065748" ]
0.0
-1
Set the angle of the servo motor to input angle in degrees
def set_angle(self, angle): new_angle = angle # Declaring conversion constants angle_min = 0 angle_max = 180 angle_range = angle_max - angle_min dc_range = self._dc_max - self._dc_min # Enforcing angle range if new_angle > angle_max: new_angle = angle_max elif new_angle < angle_min: new_angle = angle_min # Scaling input angle to an appropriate duty cycle duty_cycle = ((dc_range / angle_range) * (new_angle - angle_min)) + self._dc_min self._servo_pwm.changeDutyCycle(duty_cycle)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_angle(self, value):\n if not -90 <= value <= 90:\n raise ValueError('Servo angle must be between -90 and 90 degrees')\n self.duty_cycle = ...", "def set_angle(self, ang):\n if ang < 0:\n ang = 0\n elif ang > 180:\n ang = 180\n dutyCycle ...
[ "0.8180504", "0.78847355", "0.7495005", "0.733505", "0.7213522", "0.71889293", "0.71747357", "0.7051908", "0.7026474", "0.70241266", "0.69745034", "0.6967439", "0.6912899", "0.6871412", "0.6827222", "0.681534", "0.67669934", "0.674949", "0.6662368", "0.66219145", "0.6615597",...
0.7820531
2
Will be called before every test
def setUp(self): # Create table db.create_all() #Create test registree mcdonalds = Store(name='mcdonalds', shop_address='63 Northbrook st', shop_postcode='rg14 1ae', takeaway=True) tesco = Store(name='tesco', shop_address='London rd, Newbury', shop_postcode='rg14 2bp', takeaway=False) coop = Store(name='coop', shop_address='Andover rd', shop_postcode='rg19 3bp', takeaway=False) #adding test receipts to db receipt1 = Receipts(most_expensive=5.09, cost_of_alcohol=0, receipt_total=11.36, takeaway=True, delivery_fee=1.99, delivery_time_mins=28, store_id=1, shop=mcdonalds) receipt2 = Receipts(most_expensive=2.80, cost_of_alcohol=16, receipt_total=11.90, store_id=2, shop=tesco) receipt3 = Receipts(most_expensive=3.00, cost_of_alcohol=0, receipt_total=18.76, store_id=2, shop=tesco) receipt4 = Receipts(most_expensive=2.00, cost_of_alcohol=0, receipt_total=20.91, store_id=2, shop=tesco) #Add and save to database store_list = [mcdonalds, tesco, coop] receipt_list = [receipt1, receipt2, receipt3, receipt4] for i in store_list: db.session.add(i) for i in receipt_list: db.session.add(i) db.session.commit()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def before_run_tests(cls):\n pass", "def setUp(self):\r\n # nothing to do, all tests use different things\r\n pass", "def setUp(self):\n self", "def setUp(self):\n self", "def setUp(self):\n\n pass", "def setUp(self):\n\n pass", "def setUp(self):\n ...
[ "0.8457524", "0.83148706", "0.82529396", "0.82529396", "0.81978506", "0.81978506", "0.81887835", "0.81887835", "0.81887835", "0.81887835", "0.81887835", "0.81887835", "0.81887835", "0.81887835", "0.81887835", "0.81887835", "0.81887835", "0.81887835", "0.81887835", "0.81887835",...
0.0
-1
Will be called after every test
def teardown(self): db.session.remove() db.drop_all()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def after_test(self, test_results):\n pass", "def after_all(self) -> None:", "def tearDown(self):\n pass", "def teardown(self):", "def teardown(self):", "def teardown(self):", "def tearDown(self):\r\n pass", "def tearDown(self):\r\n pass", "def tearDown(self):\r\n ...
[ "0.84368247", "0.8336089", "0.82743084", "0.8255529", "0.8255529", "0.8255529", "0.8220987", "0.8220987", "0.8220987", "0.8220987", "0.8220987", "0.8202792", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.819...
0.0
-1
Prompt user for input and continue to do so until input is valid. This function takes two required inputs, the message to display, and the limit of characters required. If the user enters something too long, they are prompted again until the input is correct. If the optional isNumber parameter is True, then it will also continue to prompt the user until a valid number is input.
def LimitedInput(message, limit, isNumber=False): keepAsking = True while keepAsking: answer = input(message) if len(answer) > limit: print("The input must be", limit, "characters or less.") else: keepAsking = False if isNumber is True and CheckNumber(answer) is False: print("The input must be a number.") keepAsking = True return answer
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ask_number(message: str) -> int:\n global number\n assert isinstance(message, str), \"message should be a string\"\n stop_condition2 = False\n while not stop_condition2:\n try:\n number = int(input(message))\n if number < lower_range:\n print(\"Please pic...
[ "0.6210186", "0.60945845", "0.6090262", "0.59687257", "0.5932961", "0.5927148", "0.59168947", "0.58883595", "0.5841568", "0.58019143", "0.5782949", "0.5775905", "0.5761319", "0.5716825", "0.57102144", "0.5705627", "0.5701563", "0.5696017", "0.56239253", "0.5619944", "0.556302...
0.83368486
0
This function returns True if userInput can be converted to a number and returns False if it cannot.
def CheckNumber(userInput): try: float(userInput) return True except(ValueError): return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate_number(value_if_allowed):\n if value_if_allowed == '':\n return True\n try:\n float(value_if_allowed)\n return True\n except ValueError:\n return False", "def __checkInput(self, var):\n try:\n int(var)\n\n except:\n return Fals...
[ "0.7273646", "0.710573", "0.70342", "0.700277", "0.6989638", "0.69800496", "0.6931918", "0.68965924", "0.68937594", "0.68852633", "0.6882689", "0.68496823", "0.6842476", "0.6823968", "0.68192196", "0.68040407", "0.679756", "0.6784865", "0.67602575", "0.6758838", "0.673784", ...
0.86746126
0
This function prompts the user for a date using the message variable. User will continue to be prompted until the format is correct. The date format is very specific in the format DD/MM/YYYYY This function will confirm there are the right number of characters, the / are in the right place, the input are numbers, the days are between 1 and 31, the months are between 1 and 12, and the year is between 2000 and 3000 (roll on year 3k bug!)
def DateInput(message): askAgainMessage = "The date must be in the format DD/MM/YYYY" keepAsking = True while keepAsking: answer = input(message) # First we check if there are two / by splitting using / and looking # for 3 items in the returned list. dateCheck = answer.split(sep="/") if len(dateCheck) is not 3: print(askAgainMessage) else: # If all is order, we can assign the 3 items to day, month, year day = dateCheck[0] month = dateCheck[1] year = dateCheck[2] # Next we check each item has the right amount of characters # and they can all be converted into numbers. if (len(day) == 2 and len(month) == 2 and len(year) == 4 and CheckNumber(day) and CheckNumber(month) and CheckNumber(year)): day = int(day) month = int(month) year = int(year) if (day > 0 and day < 32 and month > 0 and month < 13 and year > 2000 and year < 3000): keepAsking = False else: print(askAgainMessage) else: print(askAgainMessage) return answer
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_date(message, param):\n while True:\n try:\n day, month, year = input(message).split(param)\n return str(datetime.datetime(int(year), int(month), int(day)).strftime(\"%d/%m/%Y\"))\n except ValueError:\n continue", "def enter_date...
[ "0.7718624", "0.74047714", "0.7254131", "0.6943424", "0.681888", "0.6775938", "0.6571638", "0.65667284", "0.6505889", "0.6504818", "0.6378474", "0.6291643", "0.6249938", "0.61674076", "0.61129284", "0.61053425", "0.6080972", "0.6056492", "0.60186255", "0.5945199", "0.5924336"...
0.8515413
0
Unicode representation of Match History
def __str__(self): return f"{str(self.team1)} vs {str(self.team2)} on {str(self.date)}"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def toString(self) -> unicode:\n ...", "def toString(self) -> unicode:\n ...", "def __str__(self):\n return \"{}\".format(self._matches.keys())", "def unicode(self, irc, msg, args, query):\n url = \"http://unicodelookup.com/lookup?\"\n url = url + urlencode({\"q\": query, \...
[ "0.5806015", "0.5806015", "0.5598331", "0.5412658", "0.53824925", "0.53676164", "0.5249685", "0.5227061", "0.51851416", "0.5137522", "0.5127437", "0.51141953", "0.5091751", "0.50783116", "0.50761265", "0.50514686", "0.5038033", "0.50119203", "0.50102234", "0.4991699", "0.4987...
0.0
-1
takes first row of tworow belief np array and converts it to dict indexed by label of positive beliefs
def np_to_belief(np_array,labels): return dict((l,np_array[0,i]) for i,l in enumerate(labels))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_to_original_labels(array, threshold=0.5, initialization_value=999):\r\n \r\n binarized, belief = get_binarized_and_belief(array=array, threshold=threshold)\r\n \r\n #sanity check\r\n if binarized.shape != belief.shape:\r\n raise ValueError('Sanity check did not pass.')\r\n ...
[ "0.6541998", "0.6310633", "0.5895067", "0.566295", "0.5637139", "0.5578173", "0.55679846", "0.55309737", "0.54985076", "0.5497383", "0.54970396", "0.5491054", "0.5483852", "0.5483852", "0.54637516", "0.54519457", "0.5446611", "0.5395349", "0.53934413", "0.53921664", "0.538463...
0.78097486
0
takes a list of votes and predicts based on threshold returns true iff fraction of true votes >= f
def thresh_vote(lst, f): if len(lst) == 0: # guess 0 by default (appropriate for our dataset) q = 0 else: q = float(sum(lst)) / len(lst) return q >= f
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def binary_predict(probs, threshold = 0.5):\n return (probs >= threshold) * np.ones(len(probs))", "def sensitivity(\n targets: List[int], preds: List[float], threshold: float = 0.5\n) -> float:\n return recall(targets, preds, threshold)", "def get_predict(prediction, threshold):\n\n prediction[predic...
[ "0.6548085", "0.6381974", "0.6354342", "0.6264731", "0.6184546", "0.6126238", "0.61081564", "0.60839015", "0.6067138", "0.60335726", "0.5974422", "0.59717226", "0.5970143", "0.59656197", "0.5960877", "0.5954389", "0.59533507", "0.5947933", "0.5928161", "0.5925397", "0.5925221...
0.7472907
0
Takes dictionaries of predicted and ground truth and returns confusion matrix
def confusion_matrix(predicted, gt): tp = [k for k in predicted if predicted[k] and gt[k]] tn = [k for k in predicted if not predicted[k] and not gt[k]] fp = [k for k in predicted if predicted[k] and not gt[k]] fn = [k for k in predicted if not predicted [k] and gt[k]] return tp, tn, fp, fn
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_confusion_matrix_intersection_mats(groundtruth, predicted):\n\n confusion_matrix_arrs = {}\n\n groundtruth_inverse = np.logical_not(groundtruth)\n predicted_inverse = np.logical_not(predicted)\n\n confusion_matrix_arrs['tp'] = np.logical_and(groundtruth, predicted)\n confusion_matrix_arrs['t...
[ "0.7215619", "0.71285826", "0.7120823", "0.70471257", "0.70212066", "0.6992223", "0.696946", "0.6948753", "0.6921117", "0.69178545", "0.6913209", "0.69081", "0.68527514", "0.6760738", "0.675917", "0.6750563", "0.6734669", "0.6700748", "0.6682267", "0.6672295", "0.66603005", ...
0.72815716
0
Returns argmax, max of dictionary
def argmax(d): return max(d.iteritems(), key=operator.itemgetter(1))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def argMax(self):\n if len(self.keys()) == 0: return None\n all = list(self.items())\n values = [x[1] for x in all]\n maxIndex = values.index(max(values))\n return all[maxIndex][0]", "def argMax(self):\n if len(list(self.keys())) == 0:\n return None\n a...
[ "0.7893945", "0.7893012", "0.78599966", "0.7807235", "0.7397014", "0.7194823", "0.7194823", "0.7194823", "0.7170892", "0.7158785", "0.7147766", "0.71402246", "0.71295154", "0.71241313", "0.7121471", "0.7005879", "0.6961847", "0.6961493", "0.69519615", "0.68631655", "0.6839727...
0.83839536
0
Produce nboot bootstrap samples from applying func to data
def bootstrap(data,func,nboot): n = len(data) resamples = np.array([[random.choice(data) for i in range(n)] for j in range(nboot)]) return np.apply_along_axis(func, 1, resamples)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bootstrap(data):\r\n size = int(len(data))\r\n train = resample(data, n_samples=size, replace=True)\r\n test = data.drop(train.index) \r\n return train[encoded_features], train[target], test[encoded_features], test[target]", "def bootstrap_replicate_1d(data, func):\r\n bs_sample = np.rand...
[ "0.7176311", "0.71086556", "0.7089425", "0.7087817", "0.7020718", "0.68657595", "0.6752474", "0.6727999", "0.6657717", "0.63728184", "0.62784743", "0.62304413", "0.62304413", "0.61555386", "0.60460633", "0.6008815", "0.6005979", "0.6003459", "0.59947544", "0.5991532", "0.5978...
0.8077993
0
Split the dataset by features and labels
def split( self, df, iteration_col, episode_col, iteration_order, lagger_str, current_row, feature_cols, label_cols, augmented_cols, ): logger.info( f"Iteration order set to {iteration_order} so using {current_row} from {lagger_str} {iteration_order} row" ) # We group by episode and iteration indices to make dataset episodic df = df.sort_values(by=[episode_col, iteration_col]) # Create a lagged dataframe for capturing inputs and outputs # when iteration_order < 0, this will consist of the features # since we are doing a shift-backwards # when iteration_order > 0, this will consist of labels # since we are doing a shift-forward lagged_df = df.groupby(by=episode_col, as_index=False).shift( iteration_order * -1 ) lagged_df = lagged_df.drop([iteration_col], axis=1) # if iteration order is less than 1 # then the actions, configs should not be lagged # only states should be lagged # features = lagged_df[states] + df[actions, configs] # labels = df[states] if iteration_order < 0: features_df = lagged_df[feature_cols] features_df[augmented_cols] = df[augmented_cols] # if iteration order is greater than 1 # then features = states, actions, configs from current row (df) # labels = states from next row (lagged_df) else: features_df = df[feature_cols] # TODO: check, is this always redundant? # i.e., is feature_cols is supset of augmented_cols features_df[augmented_cols] = df[augmented_cols] # eventually we will join the labels_df with the features_df # if any columns are matching then rename them if bool(set(feature_cols) & set(label_cols)): features_df = features_df.rename( columns=lambda x: "prev_" + x if x in label_cols else x ) self.feature_cols = list(features_df.columns.values) self.label_cols = list(label_cols) logger.info(f"Feature columns are: {self.feature_cols}") logger.info(f"Label columns are: {self.label_cols}") # joined_df = df.join(features_df) vars_to_keep = ( [episode_col, iteration_col] + self.feature_cols + self.label_cols ) if iteration_order < 0: labels_df = df[[episode_col, iteration_col] + self.label_cols] else: labels_df = df[[episode_col, iteration_col]].join(lagged_df[self.label_cols]) return labels_df.join(features_df)[vars_to_keep]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def split_data(data, labels):\r\n # Split the data into train and test\r\n X_train, X_test, y_train, y_test = train_test_split(data, labels, test_size=0.30, random_state = 42)\r\n return(X_train, y_train, X_test, y_test)", "def split_dataset(instances, labels, train_split=0.8):\n split = int(train_sp...
[ "0.7630744", "0.75351566", "0.72341454", "0.72143763", "0.7188007", "0.7188007", "0.71554524", "0.70676637", "0.70484245", "0.6948134", "0.6905886", "0.69033486", "0.68980664", "0.68860745", "0.687833", "0.687833", "0.6874963", "0.68439305", "0.6804945", "0.6803974", "0.67803...
0.0
-1
Read episodic data where each row contains either inputs and its preceding output output or the causal inputs/outputs relationship
def read( self, df: pd.DataFrame, iteration_order: int = -1, episode_col: str = "episode", iteration_col: str = "iteration", feature_cols: List[str] = ["state_x_position"], label_cols: List[str] = ["state_x_position"], augmented_cols: List[str] = ["action_command"], ): # CASE 1: rows are of the form {st+1, at} # Append st into next row # if iteration_order < 0 then drop the iteration - iteration_order iteration from each episode # and append previous state columns into each row: {st+1, at} -> {st, at, st+1} if all([episode_col, iteration_col, iteration_order < 0]): lagger_str = "previous" current_row = "inputs" joined_df = self.split( df, iteration_col, episode_col, iteration_order, lagger_str, current_row, feature_cols, label_cols, augmented_cols, ) # skip the first row of each episode since we do not have its st joined_df = ( joined_df.groupby(by=episode_col, as_index=False) .apply(lambda x: x.iloc[iteration_order * -1 :]) .reset_index() ) return joined_df.drop(["level_0", "level_1"], axis=1) # CASE 2: rows of the form {st, at} # Append st+1 from next row into current row {st, at, st+1} elif all([episode_col, iteration_col, iteration_order > 0]): lagger_str = "next" current_row = "outputs" joined_df = self.split( df, iteration_col, episode_col, iteration_order, lagger_str, current_row, feature_cols, label_cols, augmented_cols, ) # truncate before the end of iteration_order for complete observations only joined_df = ( joined_df.groupby(by=episode_col, as_index=False) .apply(lambda x: x.iloc[: iteration_order * -1]) .reset_index() ) return joined_df.drop(["level_0", "level_1"], axis=1) else: return df
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_iers_EOP(input_file):\n #-- read data file splitting at line breaks\n with open(input_file,'r') as f:\n file_contents = f.read().splitlines()\n #-- number of data lines\n n_lines = len(file_contents)\n dinput = {}\n dinput['MJD'] = np.zeros((n_lines))\n dinput['x'] = np.zeros((...
[ "0.59410346", "0.567626", "0.5638466", "0.55620134", "0.55526614", "0.5529649", "0.54932714", "0.54829437", "0.5475036", "0.5426845", "0.5419794", "0.5365769", "0.5346987", "0.5330866", "0.5302111", "0.52904403", "0.5289346", "0.527683", "0.5269207", "0.5269175", "0.52437896"...
0.55652803
3
trace finds the line, the filename and error message and returns it to the user
def trace(): import traceback tb = sys.exc_info()[2] tbinfo = traceback.format_tb(tb)[0] # script name + line number line = tbinfo.split(", ")[1] # Get Python syntax error # synerror = traceback.format_exc().splitlines()[-1] return line, __file__, synerror
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def trace():\n import traceback, inspect\n tb = sys.exc_info()[2]\n tbinfo = traceback.format_tb(tb)[0]\n filename = inspect.getfile(inspect.currentframe())\n # script name + line number\n line = tbinfo.split(\", \")[1]\n # Get Python syntax error\n #\n synerror = traceback.format_exc()....
[ "0.7392801", "0.7135186", "0.6529018", "0.63906014", "0.6296253", "0.6159555", "0.6074369", "0.6055162", "0.6053701", "0.60512197", "0.60511047", "0.60449284", "0.6025194", "0.5981327", "0.5965538", "0.592666", "0.5902943", "0.5831507", "0.580618", "0.5780158", "0.57784814", ...
0.7310826
1
Validates and ensures output workspace exists
def validate_workspace(wrksp): try: if wrksp.lower().endswith('.gdb') and \ os.path.isdir(wrksp) == False: return arcpy.CreateFileGDB_management(out_folder_path=os.path.dirname(wrksp), out_name=os.path.basename(wrksp))[0] elif wrksp.lower().endswith('.sde') and \ os.path.isfile(wrksp) == False: raise ValueError("SDE workspace must exist before using it.") elif os.path.isdir(wrksp) == False: os.makedirs(wrksp) return wrksp else: return wrksp except: line, filename, synerror = trace() raise FunctionError( { "function": "validate_workspace", "line": line, "filename": filename, "synerror": synerror, "arc" : str(arcpy.GetMessages(2)) } )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_missing_output_workspace(self):\n manifest = copy.deepcopy(job_test_utils.COMPLETE_MANIFEST)\n config = copy.deepcopy(self.configuration)\n config['output_workspaces'] = {}\n json_data = {\n 'manifest': manifest,\n 'configuration': config\n }\n\n ...
[ "0.6972508", "0.65746725", "0.65183914", "0.6397043", "0.6383192", "0.6307421", "0.60515106", "0.60397726", "0.5996995", "0.5954663", "0.5945204", "0.59358025", "0.58788586", "0.58496416", "0.58496416", "0.577803", "0.572291", "0.56724334", "0.5628311", "0.56132805", "0.55874...
0.6396854
4
Adds the required columns to the table and appends new records if given.
def extend_table(table, rows=None): try: if rows is None: rows = [] dtypes = np.dtype( [ ('_ID', np.int), ('MEAN_DEF_CNT', np.float64), ('MEDIAN_DEF_CNT', np.int32), ('MIN_DEF_CNT', np.int32), ('MAX_DEF_CNT', np.int32), #STandard deviation ('PRI_NUM_DEF', np.int32), ('SEC_NUM_DEF', np.int32), ('PER_PRI', np.float64), ('PER_SEC', np.float64), ("PRI_ATTR_DEF", '|S20'), # pri_attr ("SEC_ATTR_DEF", '|S20'), ('PRI_ATTR_DEF_PER', np.float64), ('SEC_ATTR_DEF_PER', np.float64), ('FEATURE_CNT', np.int32), ('PRI_ATTR_DEF_CNT', np.float64), ('SEC_ATTR_DEF_CNT', np.float64), ('LC_SCORE', np.int32) ] ) array = np.array(rows, dtypes) da.ExtendTable(table, "OID@", array, "_ID", False) return table except: line, filename, synerror = trace() raise FunctionError( { "function": "extend_table", "line": line, "filename": filename, "synerror": synerror, "arc" : str(arcpy.GetMessages(2)) } )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_cols(self, source) :\n\n cols = source.get_cols()\n types = source.get_types()\n\n new_cols = []\n new_types = []\n for i in range(len(cols)) :\n if cols[i] not in self.cols :\n new_cols.append(cols[i])\n new_types.append(typ...
[ "0.6977524", "0.66601425", "0.6458155", "0.63593054", "0.6348672", "0.63241756", "0.63117015", "0.62366897", "0.62343997", "0.6218119", "0.61731595", "0.61588264", "0.60610455", "0.6052164", "0.6035236", "0.60302246", "0.5991479", "0.59869546", "0.5986548", "0.5966985", "0.58...
0.0
-1
main driver of program
def main(*argv): try: attr_features = argv[0] sql_clause = argv[1] polygon_grid = argv[2] error_field_count = str(argv[3]) #'NULL_COUNT'# error_field_def = str(argv[4]) #'NULL_COLUMNS'# output_fc = argv[5] out_fc_exists = arcpy.Exists(output_fc) # Local Variable # scratchFolder = env.scratchFolder scratchGDB = env.scratchGDB results = [] # Logic # if not out_fc_exists: output_gdb = validate_workspace(os.path.dirname(output_fc)) # Create the grid # out_grid = arcpy.CopyFeatures_management(polygon_grid, output_fc)[0] out_grid = extend_table(out_grid) where_clause=None else: arcpy.MakeFeatureLayer_management(output_fc, "lyr") arcpy.SelectLayerByLocation_management("lyr", "HAVE_THEIR_CENTER_IN", polygon_grid) oids = [row[0] for row in arcpy.da.SearchCursor("lyr", "OID@")] if len(oids) >1: oids_string = str(tuple(oids)) else: oids_string = str('('+ str(oids[0]) + ')') where_clause = 'OBJECTID IN ' + oids_string error_field = (error_field_def, error_field_count) # Process the Data # poly_desc = arcpy.Describe(output_fc) fc_desc = arcpy.Describe(attr_features) if poly_desc.extent.within(fc_desc.extent): temp_fc = 'in_memory/clip' arcpy.AddMessage('Clipping features to polygon') arcpy.Clip_analysis(attr_features, output_fc, temp_fc) arcpy.AddMessage('Created in_memory fc') #data_sdf = geomotion.SpatialDataFrame.from_featureclass(temp_fc, # fields=[value_field]) if sql_clause: attr_sdf = SpatialDataFrame.from_featureclass(temp_fc, fields=error_field, where_clause=sql_clause) else: attr_sdf = SpatialDataFrame.from_featureclass(temp_fc, fields=error_field) arcpy.AddMessage('features read into spatial dataframe after clipping') else: #data_sdf = geomotion.SpatialDataFrame.from_featureclass(, fields=[value_field]) arcpy.AddMessage('features read into spatial dataframe without clipping') if sql_clause: attr_sdf = SpatialDataFrame.from_featureclass(attr_features, fields=error_field, where_clause=sql_clause) else: attr_sdf = SpatialDataFrame.from_featureclass(attr_features, fields=error_field) grid_sdf = SpatialDataFrame.from_featureclass(filename=output_fc, where_clause=where_clause) index = attr_sdf.sindex for idx, row in enumerate(grid_sdf.iterrows()): errors = [] attrs = [] geom = row[1].SHAPE oid = row[1].OBJECTID print(str(oid)) ext = [geom.extent.lowerLeft.X, geom.extent.lowerLeft.Y, geom.extent.upperRight.X, geom.extent.upperRight.Y] row_oids = list(index.intersect(ext)) df_current = attr_sdf.loc[row_oids]#.copy() sq = df_current.geometry.disjoint(geom) == False fcount = len(df_current[sq]) # Total Count q2 = df_current[error_field_count] > 0 #& q2 df_current = df_current[sq].copy() # Get the # of features with deficiency_cnt > 0 #print("here") if fcount>0: #len(df_current) > 0: errors += df_current[error_field_count].tolist() arcpy.AddMessage(str(errors)) def process(x): print(x) return [va for va in x.replace(' ', '').split('|')[-1].split(',') if len(va) > 1] for e in df_current[error_field_def].apply(process).tolist(): attrs += e del e row = get_answers(oid=oid, err=errors, attr=attrs, feature_count=fcount) results.append(row) if len(results) > 250: extend_table(table=output_fc, rows=results) results = [] del idx del row del errors del attrs del geom del oid del ext del row_oids del df_current del sq del q2 if len(results) > 0: extend_table(table=output_fc, rows=results) del index del results del grid_sdf del attr_sdf except arcpy.ExecuteError: line, filename, synerror = trace() arcpy.AddError("error on line: %s" % line) arcpy.AddError("error in file name: %s" % filename) arcpy.AddError("with error message: %s" % synerror) arcpy.AddError("ArcPy Error Message: %s" % arcpy.GetMessages(2)) except FunctionError as f_e: messages = f_e.args[0] arcpy.AddError("error in function: %s" % messages["function"]) arcpy.AddError("error on line: %s" % messages["line"]) arcpy.AddError("error in file name: %s" % messages["filename"]) arcpy.AddError("with error message: %s" % messages["synerror"]) arcpy.AddError("ArcPy Error Message: %s" % messages["arc"]) except: line, filename, synerror = trace() arcpy.AddError("error on line: %s" % line) arcpy.AddError("error in file name: %s" % filename) arcpy.AddError("with error message: %s" % synerror)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n pass", "def main():\n run_program()", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main()...
[ "0.8577987", "0.85386586", "0.8478492", "0.8478492", "0.8478492", "0.8478492", "0.8478492", "0.8478492", "0.8478492", "0.8478492", "0.8478492", "0.8478492", "0.8478492", "0.8478492", "0.8478492", "0.8478492", "0.8478492", "0.8478492", "0.8478492", "0.8478492", "0.8478492", ...
0.0
-1
The constructor of the class. Here you will need to create the attributes ("instance variables") that were described in the docstring. Note that some of the attributes are defined by parameters passed to this constructor method, but others are not.
def __init__(self, name, full_name, team, eye_color, hair_color, base): self.name = name self.full_name = full_name self.team = team self.eye_color = eye_color self.hair_color = hair_color self.base = base self.powers = [] self.nemeses = []
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, **kwds):\n raise NotImplementedError", "def __init__(self) -> None:\n # TODO: Provide the complete constructor for this object", "def __init__(self, *args, **kwargs) -> None:\n pass", "def __init__(self, *args, **kwargs) -> None:\n pass", "def __init__(self, *...
[ "0.795267", "0.78457487", "0.7731138", "0.7731138", "0.7643051", "0.76117766", "0.7513325", "0.7513325", "0.7513325", "0.7513325", "0.7501279", "0.7501279", "0.7501279", "0.7375516", "0.73525584", "0.7352199", "0.73369604", "0.7306995", "0.7306995", "0.7306995", "0.7300619", ...
0.0
-1
This is the string method for the class. Whenever an instance of is passed to the str() or print() functions, the return string from this method will be returned. Fill in the instance attributes that are outlined by the characters in the variable.
def __str__(self): description = f"{self.name} is a member of the {self.team} and possesses the following powers:\n{self.powers}" return description
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __str__(self):\n # print(self.get_string())\n return self.get_string()", "def __str__(self):\n # print(self.get_string())\n return self.get_string()", "def __str__(self):\n return self.printable()", "def __str__(self):\n if self.f_has_range():\n lenstr...
[ "0.77815866", "0.77815866", "0.7584849", "0.74873406", "0.7468666", "0.74566126", "0.7438483", "0.7398238", "0.73615706", "0.7347233", "0.7347233", "0.7347233", "0.7347233", "0.7336336", "0.7325533", "0.7299921", "0.7297643", "0.7283104", "0.72561014", "0.72357535", "0.723286...
0.0
-1
This method will modify the attribute by appending the parameter to it.
def add_power(self, power): self.powers.append(power)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __setattr__(self, name, value):\n if name in ['parameters', 'program_name']: # Allowed attributes\n self.__dict__[name] = value\n else:\n self.set_parameter(name, value) # treat as a parameter", "def put_param(self, attr_name, val):\n self._params[attr_name] = val"...
[ "0.6987088", "0.6885637", "0.6829756", "0.6731595", "0.67283607", "0.67017716", "0.66079473", "0.6570364", "0.6501009", "0.64624107", "0.6447641", "0.64455295", "0.64400303", "0.64285815", "0.6388802", "0.6383218", "0.637725", "0.63599354", "0.63599354", "0.63599354", "0.6348...
0.0
-1
This method will modify the attribute by appending the parameter to it.
def add_nemesis(self, nemesis): self.nemeses.append(nemesis)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __setattr__(self, name, value):\n if name in ['parameters', 'program_name']: # Allowed attributes\n self.__dict__[name] = value\n else:\n self.set_parameter(name, value) # treat as a parameter", "def put_param(self, attr_name, val):\n self._params[attr_name] = val"...
[ "0.6987088", "0.6885637", "0.6829756", "0.6731595", "0.67283607", "0.67017716", "0.66079473", "0.6570364", "0.6501009", "0.64624107", "0.6447641", "0.64455295", "0.64400303", "0.64285815", "0.6388802", "0.6383218", "0.637725", "0.63599354", "0.63599354", "0.63599354", "0.6348...
0.0
-1
THIS SHOULD LOOK FAMILIAR, SO WE PROVIDED IT FOR YOU. This function reads a .csv file and parses it into a list of dictionaries, where each dictionary is formed from the data on one line of the .csv file. This function takes one argument , which is the path of the file to be read. You will need to use the "csv" module in this function.
def read_csv_file(input_filepath): out_list = [] with open(input_filepath, 'r', encoding = 'utf-8') as f: reader = csv.reader(f) for i,row in enumerate(reader): if i == 0: labels = row else: new_dict = {} for j,value in enumerate(row): new_dict[labels[j]] = value out_list.append(new_dict) return out_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_csv_as_dicts(csv_input_file_name):\n input_table = read_csv_as_table(csv_input_file_name, skip_first_line=False)\n\n # first line should contain headers\n header = input_table[0]\n # rest lines would contain actual data\n data = input_table[1:]\n\n output = []\n # process all lines wi...
[ "0.7621891", "0.7575346", "0.74887645", "0.7393947", "0.7317759", "0.73076624", "0.7262524", "0.72616434", "0.71987087", "0.71802855", "0.71753365", "0.7114956", "0.7059726", "0.70039433", "0.6988675", "0.6970228", "0.6970228", "0.6946911", "0.694326", "0.6926613", "0.690651"...
0.67380226
40
This function takes any , converts it into a string via the str() function (if possible), and then writes it to a file located at . Note that this function is a general writing function. It should not use the .csv module, but rather the python write() function. This is because we want this function to write ANYTHING we give it as the parameter (in the case of this assignement, you will actually use it to write string representations of the class instances you create).
def write_to_file(filepath, data): with open(filepath, 'w') as f: f.write(str(data))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def writeToCSV(self, filepath):\r\n\t\twith open(filepath, 'w') as outputFile:\r\n\t\t\toutputFile.write(str(self))", "def export_to_file(data, filename='class_data.txt', mode='a'):\n with open (filename, mode) as f:\n if mode == \"w\":\n for record in data:\n line = \",\".joi...
[ "0.6876169", "0.6498835", "0.63612753", "0.6353106", "0.633464", "0.62842685", "0.61852163", "0.60895437", "0.6051628", "0.6050783", "0.60384357", "0.60274184", "0.59963906", "0.5985917", "0.59784335", "0.5975056", "0.596307", "0.59575045", "0.59389687", "0.5937647", "0.59298...
0.5933413
20
In this function, you will instantiate several times, given the data provided. Then, you will open "sh_additional_info.csv" and for each line in that file, perform an operation using one of the methods of one of your classes. Follow the commented instructions in this main() function. Refer to Problem Set 07 README.md for instructions and tips.
def main(): # Refer to Problem Set 07 README.md for instructions and tips. # 6.1: Read in < sh_basic_info.csv > basic_info = read_csv_file('sh_basic_info.csv') # 6.2: Create instances of < SuperHeroine > heroines = {} for hero in basic_info: heroines[hero['name']] = SuperHeroine(hero['name'], hero['full_name'], hero['team'], hero['eye_color'], hero['hair_color'], hero['base']) print(heroines) # 6.3: Read in < sh_additional_info.csv > additional_info = read_csv_file('sh_additional_info.csv') # 6.4: Add powers and nemesis for row in additional_info: name = row["Heroine Name"] instance_affected = heroines[name] how_affected = row["Category"] value = row['Value'] if how_affected == 'power': instance_affected.add_power(value) else: instance_affected.add_nemesis(value) # 6.5: Write to file write_to_file('storm.txt',heroines['Storm']) write_to_file('scarlet_witch.txt',heroines['Scarlet Witch']) write_to_file('jessica_jones.txt',heroines['Jessica Jones'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n raw_data = pd.read_csv('data/raw_hospital_data.csv')\n\n fe_data = new_features(raw_data)\n fe_data = compressing_admission_type(data)\n fe_data = age_to_cat(fe_data)\n fe_data = compressing_careunit(fe_data)\n fe_data = compressing_curr_serv(fe_data)\n fe_data = compressing_ethn...
[ "0.6210108", "0.60456717", "0.60032433", "0.5947413", "0.5876277", "0.58508295", "0.58461136", "0.5834677", "0.57999545", "0.57364744", "0.5730089", "0.57000756", "0.5691697", "0.5639754", "0.5626317", "0.56223726", "0.55977935", "0.55886185", "0.5551414", "0.55287653", "0.55...
0.6953013
0
Run a raw GraphQL query
def query(output, query): gqlapi = gql.get_api() print_output(output, gqlapi.query(query))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def query_graphql(raw_query, endpoint):\n query = \" \".join(shlex.split(raw_query, posix=False))\n r = requests.get(endpoint, params={\"query\": query})\n if r.status_code == 200:\n return r.json()\n elif r.status_code == 400:\n response = r.json()\n assert \"errors\" in response\...
[ "0.7544899", "0.68699944", "0.6803751", "0.67968833", "0.661308", "0.65708804", "0.65654963", "0.6560553", "0.65150213", "0.65003735", "0.6493559", "0.64276546", "0.6414967", "0.63659316", "0.63526833", "0.63463426", "0.63447905", "0.6304541", "0.6291603", "0.629144", "0.6272...
0.6531391
8
passing in event_loop helps avoid 'attached to a different loop' error
def test_app(event_loop): app.finalize() app.conf.store = "memory://" app.flow_control.resume() return app
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def event_loop(self):\n logging.warning('loop undefined')", "def dispatch_loop(self):\n pass", "def check_event_loop():\n loop = asyncio.get_event_loop()\n if loop.is_closed():\n asyncio.set_event_loop(asyncio.new_event_loop())", "def _handle_loop(self):\n pass", "...
[ "0.81281793", "0.7433964", "0.71915084", "0.71593684", "0.7092627", "0.70529735", "0.70341283", "0.70070803", "0.69741666", "0.69741666", "0.69633144", "0.69001657", "0.6889111", "0.6859079", "0.6846811", "0.6844431", "0.68335426", "0.68281376", "0.6722399", "0.6682619", "0.6...
0.0
-1
Updates x, y (memoryshared) coordinates with actual mouse position with a given frequency.
def stream(bus, address, frequency, x, y, stop_trigger): mouse = Mouse.list_connected(bus=bus, address=address)[0] delay = 1./frequency while not stop_trigger: x1, y1 = mouse.get_position_change() x.value += x1 y.value += y1 time.sleep(delay)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def follow(self):\n\t\tpos = pygame.mouse.get_pos()\n\t\tself.x = pos[0]\n\t\tself.y = pos[1]\n\t\tself.draw()", "def mouse_position_event(self, x: int, y: int):\n pass", "def update_pointer(self):\n pointer_length = -self.pointer_frac * self.radius\n # Add pi/2 to the angle because we con...
[ "0.5839249", "0.57032055", "0.5696799", "0.56878513", "0.5680848", "0.56477594", "0.5643513", "0.56162447", "0.5613641", "0.56051666", "0.558602", "0.55699193", "0.5541605", "0.5541605", "0.5534065", "0.5533534", "0.5528136", "0.5512954", "0.5495194", "0.54931223", "0.548661"...
0.6736627
0
Returns the focal length of the telescope.
def focal_length(self): return self.f * self.diameter
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def estimate_focal_length(self):\n fl = (self.fiber_diameter / 2) / np.tan(np.deg2rad(self.fov / 2))\n\n return fl", "def length(self) -> ir.FloatingValue:\n return ops.GeoLength(self).to_expr()", "def bspb_focalLength():\n shotCam = pm.PyNode('shot_cam').getShape()\n return str(shot...
[ "0.7237808", "0.6786899", "0.6465912", "0.63727885", "0.6352939", "0.62659967", "0.61646885", "0.616444", "0.61160403", "0.6020924", "0.5934602", "0.5932981", "0.59197545", "0.5891779", "0.588042", "0.58679926", "0.584611", "0.58384174", "0.58230686", "0.5822641", "0.5822641"...
0.8241378
0