query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
listlengths
4
101
negative_scores
listlengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Update parameters using one step of gradient descent
def update_parameters_with_gd(parameters, grads, learning_rate): L = len(parameters) // 2 # number of layers in the neural networks # Update rule for each parameter for l in range(L): parameters["W" + str(l+1)] = parameters["W" + str(l+1)] - learning_rate * grads['dW' + str(l+1)] parameters["b" + str(l+1)] = parameters["b" + str(l+1)] - learning_rate * grads['db' + str(l+1)] return parameters
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _update_parameters(self, loss):\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()", "def updateParams(self,gradients):\n for i in xrange(len(self.params)):\n self.params[i].set_value(self.params[i].get_value()-gradients[i]/(1/self.learning_rate+self...
[ "0.7269349", "0.7062195", "0.7061314", "0.7021435", "0.6942396", "0.693245", "0.69282365", "0.6900024", "0.68978393", "0.68952733", "0.6880095", "0.6862448", "0.68579733", "0.6851211", "0.6844264", "0.68293726", "0.6826274", "0.67916423", "0.67859596", "0.67605203", "0.675119...
0.0
-1
Creates a list of random minibatches from (X, Y)
def random_mini_batches(X, Y, mini_batch_size = 64, seed = 0): np.random.seed(seed) # To make your "random" minibatches the same as ours m = X.shape[1] # number of training examples mini_batches = [] # Step 1: Shuffle (X, Y) permutation = list(np.random.permutation(m)) shuffled_X = X[:, permutation] shuffled_Y = Y[:, permutation].reshape((1,m)) # Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case. num_complete_minibatches = math.floor(m/mini_batch_size) # number of mini batches of size mini_batch_size in your partitionning for k in range(0, num_complete_minibatches): mini_batch_X = shuffled_X[:, k*mini_batch_size:(k+1)*mini_batch_size] mini_batch_Y = shuffled_Y[:, k*mini_batch_size:(k+1)*mini_batch_size] mini_batch = (mini_batch_X, mini_batch_Y) mini_batches.append(mini_batch) # Handling the end case (last mini-batch < mini_batch_size) if m % mini_batch_size != 0: mini_batch_X = shuffled_X[:, num_complete_minibatches*mini_batch_size:] mini_batch_Y = shuffled_X[:, num_complete_minibatches*mini_batch_size:] mini_batch = (mini_batch_X, mini_batch_Y) mini_batches.append(mini_batch) return mini_batches
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def random_mini_batches(X, Y, mini_batch_size = 64):\n\n\n\tm = X.shape[1]\n\tmini_batches = []\n\n\t#Shuffling around the data randomly according to the 'permutation' list\n\tpermutation = list(np.random.permutation(m))\n\tshuffled_X = X[:, permutation]\n\tshuffled_Y = Y[:, permutation].reshape((1,m))\n\n\tcomple...
[ "0.65385675", "0.6229042", "0.6218775", "0.62119395", "0.62032425", "0.61461645", "0.6137679", "0.60998034", "0.6097758", "0.60959524", "0.60723096", "0.60708946", "0.6014316", "0.5990937", "0.5940285", "0.5884059", "0.5871586", "0.5844728", "0.58423215", "0.5820612", "0.5779...
0.6306558
1
Yield successive nsized chunks from l.
def chunks(l, n): for i in range(0, len(l), n): yield l[i:i+n]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _chunk(self, l, n):\n for i in range(0, len(l) + 1, n):\n yield l[i:i + n]", "def chunks(self, l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def __chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def get_chunks(self, ...
[ "0.8038813", "0.79248375", "0.7923423", "0.7885103", "0.78773195", "0.7815877", "0.77655786", "0.77556044", "0.77441615", "0.7731815", "0.77288336", "0.772473", "0.77028215", "0.76889825", "0.76889825", "0.7664208", "0.76570904", "0.7655856", "0.7655856", "0.7635831", "0.7547...
0.76458347
21
Tests that Predictor instances are not serializable.
def test_serialization(): # Class is serializable. ray.put(DummyPredictor) # Instance is not serializable. predictor = DummyPredictor() with pytest.raises(PredictorNotSerializableException): ray.put(predictor)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_deserialize_bad_data(self):\n data = \"this is not a dictionary\"\n recommendation = Recommendation()\n self.assertRaises(DataValidationError, recommendation.deserialize, data)", "def test_valid_serialization_unfit_model(self):\n instance = GammaUnivariate()\n result =...
[ "0.58206624", "0.58120334", "0.5762754", "0.57272524", "0.56799954", "0.56695384", "0.56055886", "0.5602577", "0.5546996", "0.5540443", "0.5539814", "0.55207306", "0.54866004", "0.5472597", "0.5461503", "0.5412684", "0.53859967", "0.537395", "0.535977", "0.53534746", "0.53437...
0.7470269
0
Fit model to current data batch + previous data batch
def fit(self, x, y, logger): history = self.model1.fit(x=x, y=y, batch_size=self.batch_size, epochs=self.epochs) logger.log({'ValFuncLoss': history.history['loss'][-1]})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fit(self, data):\n\n \"\"\"YOUR CODE HERE \"\"\"\n # unormalized data\n un_st = np.concatenate([datum[\"observations\"] for datum in data])\n un_stp1 = np.concatenate([datum[\"next_observations\"] for datum in data])\n un_at = np.concatenate([datum[\"actions\"] for datum in d...
[ "0.7298221", "0.7222674", "0.71828973", "0.7077382", "0.7039742", "0.7018279", "0.69872963", "0.6833547", "0.68039864", "0.6773409", "0.6749072", "0.6726586", "0.6716468", "0.6715232", "0.6707798", "0.6707481", "0.6704683", "0.668996", "0.668996", "0.668996", "0.668996", "0...
0.6526704
53
Adds player calendar to team_cal and returns filled teams
def match_with_player(self, name, player_cal): updated_team_cal = self.team_cal.copy() filled_team_keys = [] for loc in player_cal.stack().index: current_player_count = self.team_cal.at[loc] if self.price_cal.at[loc] <= player_cal.at[loc]: if current_player_count < self.team_size * 2: updated_team_cal.at[loc] += 1 self.team_dict[f'{loc[1]}-{loc[0]}'].append(name) if current_player_count == self.team_size * 2 - 1: filled_team_keys.append(f'{loc[1]}-{loc[0]}') else: continue # team is filled self.team_cal = updated_team_cal return filled_team_keys
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_teams():", "def _create_teams(self):\n\t\tself.teamsDict = {}\n\t\tself.teamNamesList = []\n\t\tfor team in range(self.numberOfTeams):\n\t\t\tname = 'TEAM_'+str(team+1)\n\t\t\tself.teamNamesList.append(name)\n\t\t\tself.teamsDict[name] = app.game.team.Team(sport_type=self.gameData['sportType'])", "def ...
[ "0.6536402", "0.59650755", "0.5887284", "0.58515286", "0.5829555", "0.579289", "0.5745127", "0.57173425", "0.57036144", "0.56878537", "0.56028706", "0.55922323", "0.5529244", "0.5521643", "0.54728687", "0.54638255", "0.54560107", "0.5452628", "0.5441508", "0.5441441", "0.5440...
0.6215539
1
Sort players by score and alternate team picks
def make_teams(players, timeslot): player_list_with_scores = [] for name in players: player = pickle.loads(playersdb.get(name)) # while we have player object loaded, set game timeslot for player player['games'].append(timeslot) playersdb.set(name, pickle.dumps(player)) player_list_with_scores.append((name, player['score'])) player_list_with_scores.sort(key=lambda tup: tup[1], reverse=True) # sort by score teamA = [p[0] for p in player_list_with_scores[::2]] teamB = [p[0] for p in player_list_with_scores[1::2]] return teamA, teamB
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sort(self):\r\n\t\tif ScoreOpt.isGroupVassals():\r\n\t\t\tself._playerScores.sort(lambda x, y: cmp(x.sortKey(), y.sortKey()))\r\n\t\t\tself._playerScores.reverse()\r\n\t\tmaxPlayers = ScoreOpt.getMaxPlayers()\r\n\t\tif maxPlayers > 0 and len(self._playerScores) > maxPlayers:\r\n\t\t\tself._playerScores = self....
[ "0.77570707", "0.6806551", "0.6747068", "0.6733204", "0.66566753", "0.6630154", "0.65957487", "0.6527466", "0.64854187", "0.64108056", "0.64096063", "0.6390266", "0.6356324", "0.63238096", "0.6280292", "0.6257902", "0.6251791", "0.6247992", "0.62457335", "0.62404174", "0.6236...
0.5901569
39
Sends message to RabbitMQ exchange
def send_message(msg, exchange, key=None): print(msg) connection = pika.BlockingConnection(pika.ConnectionParameters(host='rabbitmq')) channel = connection.channel() exchange_type = 'direct' if exchange == 'other' else 'topic' channel.exchange_declare(exchange=exchange, exchange_type=exchange_type) if key is not None and exchange == 'logs': routing_key = f'scheduler.{key}' else: routing_key = '' channel.basic_publish(exchange=exchange, routing_key=routing_key, body=msg) connection.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def send_rabbit_message (params ):\n print \"sending message to rabbitmq exchange\"\n logging.basicConfig()\n rabbitmq_host = params.get( 'host' )\n rabbitmq_port = params.get( 'port' )\n rabbitmq_username = params.get( 'user-name' )\n rabbitmq_password = params.get( 'password' )\n exchange_na...
[ "0.7955153", "0.73282826", "0.69862974", "0.6899452", "0.68242073", "0.676501", "0.6764902", "0.6729278", "0.669033", "0.668374", "0.66331697", "0.66054446", "0.65912455", "0.6582951", "0.6516775", "0.64865685", "0.6484264", "0.64665216", "0.64554477", "0.6451518", "0.643939"...
0.75768226
1
The Slack Real Time Messaging API is an events firehose. this parsing function returns None unless a message is directed at the Bot, based on its ID.
def parse_slack_output(slack_rtm_output, bot): output_list = slack_rtm_output if output_list and len(output_list) > 0: for output in output_list: at_bot = "<@%s>" % bot['bot_id'] if output and 'text' in output and at_bot in output['text'] and 'channel' in output: # return text after the @ mention, whitespace removed return output['text'].split(at_bot)[1].strip(), \ output['channel'] return None, None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_bot_commands(slack_events):\n\n for event in slack_events:\n if event[\"type\"] == \"message\" and not \"subtype\" in event:\n print event[\"text\"]\n user_id, message = parse_direct_mention(event[\"text\"])\n if user_id == starterbot_id:\n return...
[ "0.66511947", "0.66095793", "0.6603378", "0.6577621", "0.6566526", "0.6535629", "0.6535629", "0.6535629", "0.6493531", "0.6481132", "0.6459471", "0.6458375", "0.6428089", "0.63861305", "0.63535863", "0.63366276", "0.61743027", "0.6160538", "0.6147883", "0.6101927", "0.6010351...
0.56720054
28
Import, overwrite fixtures from `[app]/fixtures`
def sync_dashboards(app=None): if not cint(frappe.db.get_single_value('System Settings', 'setup_complete')): return if app: apps = [app] else: apps = frappe.get_installed_apps() for app_name in apps: print("Updating Dashboard for {app}".format(app=app_name)) for module_name in frappe.local.app_modules.get(app_name) or []: frappe.flags.in_import = True make_records_in_module(app_name, module_name) frappe.flags.in_import = False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fixture_example_data():\n import_example_data()", "def fixtures():", "def setUp(self):\n self.fixtures_path = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \"fixtures/\"\n )", "def populate_fixtures():\n languages()\n words()", "def load_fixtures(self):\n...
[ "0.7610767", "0.71214706", "0.70407146", "0.6833973", "0.6788867", "0.6743615", "0.6529511", "0.650409", "0.64089555", "0.6364059", "0.6342745", "0.6301757", "0.6232977", "0.6109557", "0.61017966", "0.6088769", "0.58966875", "0.5851127", "0.5831919", "0.58071595", "0.57650894...
0.0
-1
Used with optparser for multiple arguments of the same type.
def key_callback(option,opt_str,value,parser): if "--epi-key" in opt_str: parser.values.epi_keys.append(value) elif "--exclude" in opt_str: parser.values.exclude_paths.append(value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Args(parser):", "def consume_options(cls, data, hittype, args):\n opt_position = 0\n data[\"t\"] = hittype # integrate hit type parameter\n if hittype in cls.option_sequence:\n for expected_type, optname in cls.option_sequence[hittype]:\n if opt_position < len(...
[ "0.66007906", "0.6587666", "0.6514029", "0.6472643", "0.64082843", "0.6338237", "0.6190322", "0.6178333", "0.61771864", "0.61676735", "0.61545366", "0.6126562", "0.61167157", "0.60940254", "0.60646427", "0.605548", "0.60479504", "0.60187566", "0.5961234", "0.59607315", "0.594...
0.0
-1
Email summary of results to user.
def EmailResults(recipient, error_mesg, topdir, dumpfile, logfile, motcor_summary): #********************************************************************************* if recipient is None: return elif 'noname' in recipient: return sender = 'preprocess' if 'Abnormal' in error_mesg > 0: subject = 'Problem while preprocessing %s' % topdir else: subject = 'Preprocessing complete for %s' % topdir mssg = error_mesg if logfile is not None and isinstance(logfile, str): f = open(logfile, 'r') lines = f.readlines() f.close() logged_errors = '' for i in xrange(len(lines)): if 'rror' in lines[i]: mssg += ''.join(lines[i-1:]) break mssg += motcor_summary if dumpfile is not None: f = open(dumpfile,'r') mssg += '\nSummary of processing:\n' mssg += f.read() f.close() send_email(recipient, subject, mssg, sender)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _show_summary(self):\n print 'Summary:'\n print ' Reports downloaded successfully: %d' % self.counts\n print ' Reports not downloaded: %d\\n' % self.failed", "def publish_summary(self, jobs):\n pass", "def report(self, results):\n self.notice(\"Test Report\\n\")\n\n ...
[ "0.6340017", "0.6205421", "0.6195181", "0.61352056", "0.60605377", "0.60505825", "0.5933892", "0.5929879", "0.5912047", "0.57762545", "0.57740766", "0.57343775", "0.56606674", "0.56550497", "0.5646071", "0.56351393", "0.5634105", "0.5610652", "0.5604074", "0.56035924", "0.559...
0.648858
0
Walk through directories and categorize the data. This method builds the "info" attribute a dictionary that characterizes each data series and defines the options and input/output filenames for each stage of processing.
def FindStuffToDo(self): while self.topdir.endswith('/'): self.topdir = self.topdir[:-1] if hasattr(self, 'LogProcess'): self.LogProcess() # Look for data to process. self.WalkPath(self.topdir) if os.path.islink('%s/anatomicals' % self.topdir): # os.walk won't follow links, so do this one manually. if not os.path.exists('%s/dicoms' % self.topdir): # Don't do a duplicate search. pathname = os.readlink('%s/anatomicals' % self.topdir) self.WalkPath(pathname) # Pair-up fieldmaps with EPI's self._SetFmapInfo() # Pair fieldmaps with strucural images. self._SetAnatTgts() # Assocate a ref.dat file with each EPI. self._GetRefdat() self._MakeEpiScratchDir() # Order the EPIs so the names are correct. self._GetEpiOrder() # Associate each EPI with an anatomical, determine if it was # acquired before or after the epi self._SetBaseEpi() self.motcor_summary = self.SummarizeMotionTargets() f = open('%s/motion_corr.txt' % self.logdir, 'w') f.write(self.motcor_summary) f.close() if self.verbose: print self.motcor_summary
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _parse_data_dir(self, data_dir):\n categories = os.listdir(data_dir)\n for folder_name in categories:\n all_fnames_list_fname = os.path.join(data_dir, folder_name,\n folder_name + \".bmf\")\n if not os.path.isfile(all_fnames_li...
[ "0.589833", "0.58914953", "0.58123064", "0.58081996", "0.57288224", "0.56694525", "0.56223905", "0.56043434", "0.5600391", "0.55908597", "0.5513597", "0.5498663", "0.54749286", "0.5472291", "0.54717815", "0.54624385", "0.54597956", "0.5453633", "0.544727", "0.54446405", "0.54...
0.0
-1
Create directory or exit on error.
def MakeDir(self, dirname): if os.path.exists(dirname): return try: os.umask(UMASK_DIR) os.makedirs(dirname) except OSError: self.errors = True errstr = '\nCould not create directory: %s ... ' % dirname self.LogErrors(errstr) raise OSError(errstr) os.umask(UMASK_FILE)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_dir(self):\n if not os.path.exists(self.d):\n try:\n os.mkdir(self.d)\n except OSError, e:\n if e.errno != 17:\n raise\n pass", "def make_dir(path=None):\n\n if not os.path.exists(path):\n try:\n ...
[ "0.7861626", "0.77017003", "0.7609223", "0.7593223", "0.75566745", "0.75102687", "0.75102687", "0.7497127", "0.7463986", "0.74487466", "0.74413145", "0.74352056", "0.74352056", "0.74352056", "0.74352056", "0.742612", "0.74215883", "0.7420352", "0.74127334", "0.74027", "0.7396...
0.0
-1
Fill in a heirarcy of template files. The default template file is first loaded. Then entries are overwritten by entries in the studylevel template (in the directory containing each subjectlevel directories). Finally, entries in the subjectlevel template are loaded.
def _FindTemplateFile(self, topdir): if topdir.endswith('..'): topdir = '/'.join(topdir.split('/')[:-2]) fnames = os.listdir(topdir) for fname in fnames: filename = '%s/%s' % (topdir, fname) if filename.endswith('.yaml') and not os.path.isdir(filename) and \ os.path.exists(filename): f = open(filename, 'r') magic_code = f.read(22) f.close() if '#!fmri_file_template' in magic_code: return filename return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __fill_all_templates__(self,configs):\n template_dir = configs['system'].get('Common_directories','template')\n sample_template = os.path.join(template_dir,configs['pipeline'].get('Template_files','sample'))\n system_template = os.path.join(template_dir,configs['pipeline'].get('Template_fi...
[ "0.68409294", "0.644731", "0.6447111", "0.6338145", "0.62998015", "0.6060812", "0.5989269", "0.58991736", "0.5831137", "0.5817373", "0.57764006", "0.57094055", "0.56525546", "0.5633865", "0.56115997", "0.5558664", "0.5556968", "0.5553622", "0.5548341", "0.55309093", "0.550637...
0.0
-1
Read a single template file and return the resulting dict object.
def _LoadTemplate(self,fname): f = open(fname, 'r') lines = f.readlines() data = '' for line in lines: if not line.startswith('---'): data += line data = data.replace('\t',' ') if '\t' in data: errstr = \ 'Illegal tabs encountered in template file. Use spaces instead.' raise ScannerError(errstr) proc.LogErrors(errstr) tmplt = yaml.load(data) f.close() return tmplt
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _read_template_file(self):\n try:\n return json.loads(open(self.TEMPLATE_FILE,'r').read())\n except FileNotFoundError:\n sys.stdout.write(\"Template-file does not exist.\\n\")\n except OSError as e:\n sys.stdout.write(\n \"Error: \\'{}\\' occ...
[ "0.7782369", "0.76745313", "0.7547481", "0.74933225", "0.74301606", "0.738433", "0.7244087", "0.7218751", "0.71575886", "0.6929455", "0.69022644", "0.6875117", "0.68383133", "0.67652184", "0.659829", "0.65947074", "0.65571237", "0.6519493", "0.64934295", "0.649163", "0.648591...
0.64322484
23
Load the hierarchy of templates.
def _GetTemplate(self): # First read default template. tmplt = self._LoadTemplate(c.preproc_template_default) tmplt['proc'] = self.topdir self.template_type = 'default' self.templates = [] if self.template_file is not None: tmplt.update(self._LoadTemplate(self.template_file)) self.template_type = 'command-line' self.templates.append(os.path.abspath(self.template_file)) found_template = True else: # Find a study specific template file. study_template_file = self._FindTemplateFile('%s/..' % self.topdir) if study_template_file is not None: # Merge study template into default, study template has precedence. if self.verbose: print "Using study template at " + study_template_file tmplt.update(self._LoadTemplate(study_template_file)) self.template_type = 'study-specific' self.templates.append(os.path.abspath(study_template_file)) found_template = True else: found_template = False # Now look for a subject-specific template file. subject_template_file = self._FindTemplateFile('%s' % self.topdir) if subject_template_file is not None: # Merge subject template, subject template has precedence. if self.verbose: print "Using subject-specific template at %s" % \ subject_template_file tmplt.update(self._LoadTemplate(subject_template_file)) self.template_type = 'study-specific' self.templates.append(os.path.abspath(subject_template_file)) found_template = True if not found_template: raise RuntimeError('Could not find template file.') if tmplt.get('subject','same') == 'same': # Default subdirectory is same as data directory. tmplt['subject'] = self.topdir.split('/')[-1] else: if not isinstance(tmplt['subject'],str): errstr = 'preprocess: Invalid subject number. Be sure to ' + \ 'enclose the subject number item with double quotes.' raise RuntimeError(errstr) # Keys that apply to all EPIs. self.fsl_flip = tmplt.get('fsl_flip', False) if self.fsl_flip: self.flip_opts = '-LT' else: self.flip_opts = '' # Replace strings with python types. for key in tmplt.keys(): if tmplt[key] == 'None': tmplt[key] = None elif key == 'True': tmplt[key] = True elif key == 'False': tmplt[key] = False return tmplt
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _load_templates(cls):\n if cls._raw_templates is None:\n cls._raw_templates = fetch_rrlyrae_templates()", "def load_templates(self):\n TemplateHandler.templates = []\n for template in os.listdir(TemplateHandler.templates_path):\n template_config = self.load_template...
[ "0.727779", "0.7196964", "0.65432745", "0.64201707", "0.6378715", "0.6374151", "0.6279858", "0.6175564", "0.6173083", "0.60840946", "0.60698014", "0.5997303", "0.5987318", "0.5981226", "0.59653664", "0.5951422", "0.58668286", "0.58246344", "0.58026916", "0.576884", "0.5767016...
0.5642072
24
Process the data in the templates and set attributes accordingly.
def _ProcessTemplate(self,topdir): self.dicomdir = "%s/anatomicals" % self.topdir self.rawdir = "%s/raw" % topdir self.rawdirs = {} tmplt = self._GetTemplate() if self.opts.outdir is not None: # Override template output directory. tmplt['top_outdir'] = self.opts.outdir self.tmplt = tmplt if len(tmplt['top_outdir']) == 0: tmplt['top_outdir'] = os.path.realpath(self.topdir) raise RuntimeError('Template file must specify an output directory.') tmplt['top_outdir'] = os.path.realpath(tmplt['top_outdir']) if '/home' in tmplt['top_outdir'][:7]: raise RuntimeError('Image data cannot be stored in the /home partition. Change the "top_outdir" entry in the template file: %s.' % (' '.join(self.templates))) # tmplt['subject'] = 'orig' self.procdir = os.path.abspath("%s/%s" % \ (tmplt['top_outdir'],tmplt['subject'])) target = os.path.abspath('%s/../..' % tmplt['top_outdir']) if not ismounted(target): raise RuntimeError('Could not access partition at %s' % target) self.anatdir = "%s/anat" % self.procdir self.fmapdir = "%s/%s" % (self.procdir,tmplt['fmap']['outdir']) self.dtidir = "%s/%s" % (self.procdir,tmplt['dti']['outdir']) self.logdir = "%s/%s" % (self.procdir,tmplt['logdir']) self.skip = tmplt.get('skip', DEFAULT_SKIP) self.acq_tr = tmplt.get('acq_tr',None) self.episetup_dir = "%s/%s" % (self.procdir,tmplt['first_epi']) self.fsl_cmpblty = tmplt.get('fsl_compatibility',False) self.epi_file_format = self.tmplt['epi_file_format'] self.censor_thresh = tmplt.get('censor_threshold', 2.) self.censor_interleave = tmplt.get('censor_interleave', True) # self.server_userid = self.tmplt.get('server_userid','default') # Overide flags for aligning EPIs and skull-stripping with command- # line options. if self.opts.align_fmaps: self.align_fmaps = True else: self.align_fmaps = self.tmplt.get('epi_align', False) if self.opts.no_align_fmaps: self.no_align_fmaps = True else: self.no_align_fmaps = self.tmplt.get('no_epi_align', False) if self.opts.skull_strip: self.skull_strip = True else: self.skull_strip = self.tmplt.get('skull_strip', False) # Create log file now so it can be used immediately. if not os.path.exists(self.logdir): if self.verbose: print 'mkdir %s' % self.logdir if not self.opts.fake_opts: self.MakeDir(self.logdir) self._ProcessTemplateEpiInfo()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mk_data(self):\n self.data = self.DEFAULTS.copy()\n\n for template in self.raw_data.get('extends', []):\n template_data = self.load_template(template)\n self.data.update(template_data)\n\n self.data.update(self.raw_data)\n\n str_replace(self.data)\n\n if...
[ "0.6795622", "0.62453735", "0.6222564", "0.6101384", "0.6039472", "0.5970306", "0.5819297", "0.57768494", "0.57472324", "0.5730784", "0.5694763", "0.5607646", "0.5585788", "0.5584792", "0.5528987", "0.5528372", "0.5523228", "0.54737645", "0.5458935", "0.5447084", "0.5441327",...
0.5548209
14
Synthesize yaml header filename from directory name.
def _yaml_filename(self, path): fullpath = os.path.abspath(path) if not os.path.isdir(fullpath): dirname = os.path.dirname(fullpath) else: dirname = path if dirname.endswith('/'): dirname = dirname[:-1] fname = dirname.split('/')[-1] + '.yaml' return dirname, fname
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_file_name(name: types.TSeedName) -> str:\n return f\"{name}.yml\"", "def format_filename(title: str, id: Any, ext: str = \".\", dirFormat=None):\r\n ...", "def file_title(self):\n basename = os.path.basename(self.__path)\n index_dot = basename.rfind(\".\")\n if index_dot...
[ "0.63299215", "0.6177597", "0.6154436", "0.6145138", "0.61193216", "0.6117771", "0.60900265", "0.6084294", "0.59812474", "0.59652424", "0.5949886", "0.59462756", "0.5918559", "0.59006333", "0.5891024", "0.58728975", "0.5864215", "0.5860745", "0.5852123", "0.58312166", "0.5803...
0.6754749
0
Get T1 and T2 weighted structural image info.
def _AnatInfo(self, info, path): if info['data_filetype'] == 'ge_data': return ERROR outdir = '%s/%s' % (self.procdir, self.tmplt['anat']['outdir']) info['InversionTime'] = self.hdr['native_header']['InversionTime'] if info['psdname'] == 'efgre3d' or info['psdname'] == 'bravo': # Structural scans are 3d inversion-recovery. if self.hdr['native_header']['InversionTime'] < 1.: # Only inversion recovery used for anatomy. Must be calibration. return None elif self.hdr['zsize'] > 1.25: # Only one slab acquired. Assume thick slices. name = 'T1Low_%d' % self.n_t1low self.n_t1low += 1 else: if self.n_t1high == 0: name = 'T1High' else: name = 'T1High_%d' % self.n_t1high self.n_t1high += 1 else: psdname = info['psdname'] name = self.imgtype.get(psdname, info['psdname']) if self.ntype.has_key(psdname): self.ntype[psdname] += 1 name = '%s_%0d' % (name, self.ntype[psdname]) else: self.ntype[psdname] = 1 info['norm_src'] = False info['outdir'] = outdir info['filetype'] = self.tmplt['anat']['format'] info['imgfile'] = '%s/%s' % (info['outdir'], name) self.entry_map['anat'].append(self.current_entry) return OK
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def scalarInfo(img, cnt):\n\tm = cntInfo(img, cnt)\n\td = {\"perimeter\":m[\"perimeter\"], \"oreientation\":m[\"orientation\"], \"solidity\":m[\"solidity\"],\"height\":m[\"height\"], \"extent\":m[\"extent\"], \"aspect ratio\":m[\"aspect ratio\"], \"area\":m[\"area\"], \"sum intensity\":m[\"sum intensity\"], \"widt...
[ "0.56604415", "0.56434953", "0.5608303", "0.55392164", "0.55089027", "0.5473448", "0.5417464", "0.5324782", "0.5314561", "0.5309392", "0.5288557", "0.5276307", "0.524422", "0.52250624", "0.52200425", "0.52154565", "0.5210282", "0.5189751", "0.51866674", "0.515786", "0.5154455...
0.0
-1
Create list of epis in pfile format (epi_series) and of epis in dicom format (epirt_paths)
def _EpiInfo(self, info, path): epi_vals = {'tdim':self.hdr['tdim'], 'plane':self.hdr['plane'], \ 'SeriesNumber':self.hdr['subhdr']['SeriesNumber']} for key in self.epi_keys.keys(): if self.epi_keys[key] != str(epi_vals[key]): # Return None, which will cause these data to be ignored. return None # Early versions of the EPIC software saved p-files for the setup epis. # Don't process these (or any epi with fewer than eight useable frames). if self.hdr['tdim'] < (8 + self.skip): return None info['slice_order'] = self.shdr.get('SliceOrder', 'altplus') if self.shdr['EffEchoSpacing'] is not None: info['echo_spacing'] = self.shdr['EffEchoSpacing']/1000. else: info['echo_spacing'] = 0. if info['data_filetype'] == 'dicom': # Entry is name of dirctory for dicom images. if not os.path.isdir(path): entry = os.path.dirname(path) else: entry = path else: # Otherwise it is the name of a directory containing p-files. entry = path if info['data_filetype'] == 'ge_data' and info['type'] is not None: # Found a pfile. Add it to the list. if entry not in self.pfiles and info['tdim'] > 2: self.pfiles.append(entry) self.entry_map['epi'].append(entry) if info['series'] not in self.epi_series: self.epi_series.append(info['series']) elif info['data_filetype'] == 'dicom' and \ info['psdname'] == 'epibold': # This is the initial EPI done during setup. info['outdir'] = self.episetup_dir info['type'] = 'first_epi' self.entry_map['first_epi'].append(entry) info['imgfile'] = '%s/first_epi_%d' % \ (self.episetup_dir, len(self.entry_map['first_epi'])) elif ('epirt' in info['psdname'] or info['psdname'] == 'epi' or \ info['psdname'] == '*epfid2d1_64') and info['tdim'] > 2: # This is an epi reconstructed on the scanner. self.epi_series.append(info['series']) self.entry_map['epi'].append(entry) if not os.path.isdir(path): tmp_path = os.path.dirname(path) else: tmp_path = path self.epirt_paths.append(tmp_path) if self.fsl_flip: info['filetype'] = 'brik' else: info['filetype'] = self.tmplt['epi_file_format'] info['TR'] = self.hdr['tsize'] if self.tmplt['acq_tr'] is None: info['acq_tr'] = float(info['TR']) else: info['acq_tr'] = float(self.tmplt['acq_tr']) return OK
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def AssignEpiNames(self):\n# Sort each run in the series by its acquisition time.\n epi_sort = self.epi_times.keys()\n epi_sort.sort()\n# Rewrite pfiles as an ordered list of p-files to be reconstructed.\n for idx in xrange(len(epi_sort)):\n entry = self.epi_times[epi_so...
[ "0.6314016", "0.59568083", "0.5881365", "0.5844795", "0.5802068", "0.5752258", "0.56392115", "0.54253495", "0.5387592", "0.5376296", "0.53625387", "0.5316459", "0.52880996", "0.5277681", "0.52515364", "0.52128714", "0.5204417", "0.5186836", "0.5180629", "0.5144857", "0.514438...
0.63749075
0
Read the header from the raw data specified by "path" and use this information combined with the template information to generate the "info" dict object. This object defines the options and paths for each operation.
def _GetImageInfo(self,path): hd = Header(path, scan=True) hdr = hd.hdr self.hdr = hdr if hdr is None: # Either a ref.dat file or it isn't an imaging file. if 'ref' in path and 'dat' in path: self.refdats[os.path.realpath(path)] = True info = {'type':'refdat'} return info else: return None elif hdr['filetype'] == 'dicom' and not path.endswith('.yaml'): # Write a yaml file to the raw data directory if possible. dirname, outfile = self._yaml_filename(path) yaml_name = '%s/%s' % (dirname, outfile) if not os.path.exists(yaml_name): # Create yaml file using dirname, # e.g., ../anatomicals/S2_EFGRE3D/s2_efgre3d.yaml try: hd.write_hdr_to_yaml('%s/%s' % (dirname,outfile)) except IOError: # This is a nonessential function, so ignore exceptions # such as access violations. pass elif hdr['filetype'] == 'dicom' or hdr['filetype'] == 'ge_ifile': if not os.path.isdir(path): path = os.path.dirname(path) shdr = hdr['subhdr'] nhdr = hdr['native_header'] self.shdr = shdr if 'dti' in shdr.get('PulseSequenceName','').lower() \ or 'dti' in nhdr.get('PulseSequenceFile',''): psdname = 'dti' else: psdname = os.path.basename((shdr.get('PulseSequenceName','').strip()).lower()) info = {'psdname':psdname, \ 'acqtime':shdr['AcqTime'], \ 'series':int(shdr['SeriesNumber']), \ 'plane':hdr['plane'].strip(), \ 'type':self.imgtype.get(psdname,None), \ 'plane':hdr['plane'], \ 'acqtime':shdr['SeriesTime'], \ # 'fmapdir':None, \ 'refdat':None, \ 'imgfile':None, \ 'base':None, \ 'tdim':int(hdr['tdim']), \ 'echo_spacing':None, \ 'filetype':'brik', \ 'suffix':self.suffix.get(hdr['filetype'], 'brik'), \ 'data_filetype':hdr['filetype']} if info['type'] == 'localizer': # Don't process the localizer. return info if isinstance(info['acqtime'], int): info['acquisition_time'] = time.ctime(info['acqtime']) if nhdr.get('ImageFormat',('unknown'))[0] == 'DERIVED' and info['type'] == 'epi': # Sometimes screenshots are defined as epis. info['type'] = None # Call the method appropriate to the type of scan in this series. stat = apply( self.GetInfoMethods.get(info['type'], self._NoInfo), \ [info, path]) if stat: info = {'type':'break'} return info info['suffix'] = self.suffix.get(info['filetype'], 'brik') return info
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _main_header(self, hdr):\n d = {}\n # Called readDefAnalysis in OpenMIMS\n d['sample type'], d['data included'], d['sample x'], d['sample y'], \\\n d['analysis type'], d['user name'], d['sample z'], date, time = \\\n unpack(self._bo + '4i 32s 16s i 12x 16s 16s', hdr.r...
[ "0.58803326", "0.57795227", "0.5498735", "0.5484519", "0.5375694", "0.5355326", "0.5330148", "0.5242671", "0.5242115", "0.52344966", "0.52192587", "0.52183825", "0.51776946", "0.51531774", "0.51515484", "0.5147676", "0.5141665", "0.50517124", "0.5044997", "0.5039923", "0.5027...
0.56649214
2
Pair up each epi with a fieldmap.
def _SetFmapInfo(self): for epi in self.pfiles + self.epirt_paths: self.info[epi]['fmapname'] = None self.info[epi]['fmap_entry'] = None for entry in self.entry_map['fmap']: fmap_name = self.info[entry]['imgfile'] + self.info[entry]['suffix'] if self.info[entry]['plane'] == self.info[epi]['plane']: # Use the fieldmap acquired at the same plane. self.info[epi]['fmapname'] = fmap_name self.info[epi]['fmap_entry'] = entry break else: # for fmap in self.fmaps.keys(): for entry in self.entry_map['fmap']: # No fmap at same orientation, look for fmaps in other planes. # There won't be more than one, so it isn't much of a choice. fmap_name = self.info[entry]['imgfile'] + \ self.info[entry]['suffix'] if self.info[entry]['plane'] == 'sagittal': self.info[epi]['fmapname'] = fmap_name self.info[epi]['fmap_entry'] = entry break elif self.info[entry]['plane'] == 'axial': self.info[epi]['fmapname'] = fmap_name self.info[epi]['fmap_entry'] = entry break elif self.info[entry]['plane'] == 'coronal': self.info[epi]['fmapname'] = fmap_name self.info[epi]['fmap_entry'] = entry break elif self.info[entry]['plane'] == 'oblique': self.info[epi]['fmapname'] = fmap_name self.info[epi]['fmap_entry'] = entry self.info[epi]['plane'] = 'oblique' break
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def MakeFieldmaps(self):\n if self.verbose:\n print 'Compute fieldmaps.'\n for entry in self.info:\n if self.info[entry]['type'] == 'fmap':\n if self.info[entry]['imgfile'] == None:\n# Fieldmap data not found.\n return\n# ...
[ "0.5794583", "0.56985533", "0.56411123", "0.5450113", "0.5425905", "0.5395057", "0.53146863", "0.5253061", "0.52433765", "0.5232632", "0.5187637", "0.51365435", "0.511374", "0.5107384", "0.51051044", "0.51040184", "0.50853544", "0.50818384", "0.50797653", "0.5053735", "0.5050...
0.6167625
0
Find the hires structural image that was acquired nearest to "acqtime"
def _FindNearestAnat(self, acqtime): tdiff_min = 1e6 for anat in self.entry_map['anat']: if self.info[anat]['type'] == 'T1High' and \ self.info[anat]['InversionTime'] > 0.: tdiff = abs(acqtime - self.info[anat]['acqtime']) if tdiff < tdiff_min: tdiff_min = tdiff anat_min = anat return anat_min
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def closest_in_time(images, target):\n\n tgt_mjd = fits.getheader(target, ext=1)['mjd-obs']\n mjds = np.array([fits.getheader(i, ext=1)['mjd-obs'] for i in images])\n\n return images[abs(mjds - tgt_mjd).argsort()[0]]", "def find(image):\n keypoint, description = describe(image)\n # load keypoints, des...
[ "0.61136955", "0.5495255", "0.53449804", "0.53088015", "0.5267729", "0.5264703", "0.52540445", "0.51978594", "0.51893973", "0.5173132", "0.5172842", "0.517216", "0.5133897", "0.51184076", "0.50679076", "0.50616145", "0.5060293", "0.5051561", "0.5042053", "0.5037389", "0.50302...
0.58393806
1
Create structures defining acquisition time for fieldmaps and anatomicals. First find the fieldmap (or hires structural if no fieldmap was collected) nearest (on average) to the epis. Then define this series as the one that should be in register with the epis.
def _SetAnatTgts(self): anat_candidates = {} fmap_candidates = {} for entry in self.entry_map['anat']: if self.info[entry]['type'] == 'T1High': anat_candidates[entry] = self.info[entry]['acqtime'] # Find the valid anatomical acquired nearest to fieldmap. tdiff_min = 1e6 if len(self.entry_map['fmap']) > 0: for entry in self.entry_map['fmap']: anat_tgt = self. _FindNearestAnat(self.info[entry]['acqtime']) self.info[entry]['anat_ref'] = anat_tgt else: # No fieldmaps were collected. Find the structural nearest the # beginning of the EPIs. if len(self.entry_map['anat']) == 1: anat_tgt = self.entry_map['anat'][0] else: epi_start = [] tmin = 1e6 for anat in self.entry_map['anat']: if self.info[anat]['type'] != 'T1High': continue tsum1 = 0; tsum2 = 0; for epi in self.entry_map['epi']: # Difference from start of structural and first epi tsum1 += abs(self.info[anat]['acqtime'] - \ self.info[epi]['acqtime']) # Difference from start of structural and last epi tsum2 += abs(self.info[anat]['acqtime'] - \ (self.info[epi]['acqtime'] +\ self.info[epi]['TR']*self.info[epi]['tdim'])) if tsum1 < tmin or tsum2 < tmin: tmin = min(tsum1, tsum2) anat_tgt = anat # Resolve anatomical names and links. self._SetAnatNames(anat_tgt) # Set appropriate attributes in the entry for each EPI. for epi in self.entry_map['epi']: if len(self.entry_map['fmap']) > 0 and not self.no_fmapcorr: fmap_entry = self.info[epi]['fmap_entry'] anat_ref = self.info[fmap_entry]['anat_ref'] self.info[epi]['anat_tgt'] = fmap_entry self.info[epi]['anat_matfile'] = self.info[fmap_entry]['matfile'] if self.align_fmaps or (not self.no_align_fmaps and \ self._SetCatMotionFmapMats(fmap_entry, anat_ref)): # Concatenate motion-correction matrices with tranform from # fieldmap to structural. Use the registered fieldmap. self.info[epi]['catmats'] = True fmap_info = self.info[self.info[epi]['fmap_entry']] self.info[epi]['fmapname'] = \ fmap_info['imgfile_r'] + fmap_info['suffix'] else: # Assume fieldmap is in register with the structural. self.info[epi]['catmats'] = False else: self.info[epi]['anat_tgt'] = anat_tgt self.info[epi]['anat_matfile'] = None self.info[epi]['catmats'] = False self.info[epi]['anat_link'] = self.info[anat_tgt]['imgfile'] + \ self.info[anat_tgt]['suffix']
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def GetEpiAcqTimes(self, series):\n# Find minimum and maximum start times for each acquistion in series.\n self.epi_times = {}\n for entry in self.entry_map['epi']:\n# Loop through each file in this series.\n if self.info[entry]['series'] == series and \\\n ...
[ "0.62284875", "0.5733199", "0.54121506", "0.53654045", "0.5362718", "0.53480995", "0.53380924", "0.5298414", "0.5262806", "0.52612984", "0.52312875", "0.5173023", "0.5139651", "0.5129809", "0.5126058", "0.5113684", "0.51000977", "0.5095595", "0.5085868", "0.5080458", "0.50771...
0.59192055
1
Resolve anatomical names and links.
def _SetAnatNames(self, anat_tgt): # Define links to structural image in each output directory. for entry in self.entry_map['epi'] + self.entry_map['fmap'] + \ self.entry_map['dti'] + self.entry_map['asl']: self.info[entry]['anat_link'] = anat_tgt # Name the normalization source image T1High. Number the rest. anat_entries = self.entry_map['anat'][:] anat_entries.remove(anat_tgt) n_t1high = 1 for entry in anat_entries: if self.info[entry]['type'] == 'T1High': # High res T1-weighted, not normalization target. Rename it. fname = 'T1High_%d' % n_t1high fullname = '%s/%s' % (self.info[entry]['outdir'], fname) self.info[entry]['imgfile'] = fullname self.info[entry]['imgfile_skstrip'] = '%s_skstrip' % fullname self.info[entry]['matfile'] = '%s_matfile.aff12.1D' % fullname self.info[anat_tgt]['norm_src'] = False n_t1high += 1 fname = 'T1High' fullname = '%s/%s' % (self.info[anat_tgt]['outdir'], fname) self.info[anat_tgt]['imgfile'] = fullname self.info[anat_tgt]['imgfile_skstrip'] = '%s_skstrip' % fullname self.info[anat_tgt]['matfile'] = '%s_matfile.aff12.1D' % fullname self.info[anat_tgt]['norm_src'] = True self.anatomical = '%s%s' % (self.info[anat_tgt]['imgfile'], \ self.info[anat_tgt]['suffix']) # The target for motin correction is the source for spatial normalization. self.norm_src = anat_tgt
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def resolveNames(self):\n client.resolveNames(self)\n # TODO: Do any name resolutions here.\n # The names of other objects this object refers to, either intrinsically or in its parameters, should be checked here.", "def resolve_references(self):\n self.specs = self._resolve_partial(self.p...
[ "0.6231827", "0.6040455", "0.6001876", "0.5930197", "0.5827227", "0.5807689", "0.56926876", "0.562395", "0.5538968", "0.55118114", "0.54805756", "0.54794437", "0.54488856", "0.54369795", "0.5422763", "0.5406316", "0.53882045", "0.53638905", "0.53599083", "0.5340648", "0.53325...
0.0
-1
Determine whether to (1) motioncorrect to frame nearest T1High and assume that T1High and the fieldmap are in register or (2) catenate transformations to the base epi with a transformation from the base epi to T1High.
def _SetCatMotionFmapMats(self, fmap, anat): if abs(self.info[fmap]['series'] - self.info[anat]['series']) == 1: # Adjacent series, use them. return False elif abs(self.info[fmap]['acqtime'] - self.info[anat]['acqtime']) < 180: return False else: sernos = [] min_series = min(self.info[fmap]['series'], self.info[anat]['series']) max_series = max(self.info[fmap]['series'], self.info[anat]['series']) gap_series = range(min_series+1, max_series, 1) for entry in self.info.keys(): if self.info[entry]['type'] != 'null': sernos.append(self.info[entry]['series']) for series in gap_series: if series in sernos: # Fieldmap is separated from structural by one "full" series, # where a full series is any series that was worth processing # by this progroam, i.e, not a HOS, an asset cal scan, a # b1 cal scan or any other very sort calibration scan. return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def CorrectMotion(self):\n if self.verbose:\n print \"Correct for motion\"\n for entry in self.entry_map['epi']:\n info = self.info[entry]\n\n if os.path.exists(info['imgfile_m'] + info['suffix']):\n return\n# Always use brik for 3dDeconvolve....
[ "0.53131735", "0.5240588", "0.5225582", "0.52202463", "0.5204122", "0.51401055", "0.51253384", "0.51180834", "0.51146686", "0.50602734", "0.5022961", "0.49833623", "0.49649775", "0.4957402", "0.4932199", "0.49016243", "0.48917276", "0.4890399", "0.48004982", "0.47914508", "0....
0.44811964
70
Define the series and frame for the target epi for motion correction. This is done by first creating a dictionary indexed by the timedelay
def _SetBaseEpi(self): tinfo = {} for entry in self.entry_map['epi']: info = self.info[entry] if self.info[entry]['fmap_entry'] is None: tgt = info['anat_tgt'] else: tgt = info['fmap_entry'] tgt_time = self.info[tgt]['acqtime'] plane = info['plane'] if not tinfo.has_key(plane): tinfo[plane] = {} tdiff = abs(info['acqtime'] - tgt_time) tinfo[plane][tdiff] = (entry, 'start') tdiff = abs(info['acqtime'] + info['TR']*info['tdim']/1000 - tgt_time) tinfo[plane][tdiff] = (entry, 'end') bases = {} for plane in tinfo.keys(): tdiffs = tinfo[plane].keys() tdiffs.sort() bases[plane] = tinfo[plane][tdiffs[0]] for epi in self.entry_map['epi']: plane = self.info[epi]['plane'] base_entry, base = bases[plane] self.info[epi]['base_entry'] = base_entry self.info[epi]['base'] = base self.info[epi]['basefile'] = '%s'%(self.info[base_entry]['imgfile'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _animation_step(self, par_dict):\n\n t0 = time.time()\n dt = par_dict[\"dt\"]\n controller = par_dict[\"controller\"]\n integrator = par_dict[\"integrator\"]\n if controller is not None:\n _, _, tau = controller.get_control_output(\n meas_pos=self.x[...
[ "0.55629534", "0.52799535", "0.5275234", "0.52682", "0.5261582", "0.52376616", "0.5227446", "0.52257586", "0.5189562", "0.51574963", "0.5152989", "0.5131383", "0.51249796", "0.5111892", "0.5099293", "0.5094038", "0.50885504", "0.5081776", "0.5076241", "0.50693774", "0.5060204...
0.0
-1
Strip of leading directory names to make a pretty path for display.
def GetBase(self, fname, suffix): wds = fname.split('/') suff = suffix.replace('.BRIK','') suff = suff.replace('.HEAD','') if len(wds) > 1: return '.../%s' % '/'.join(wds[-2:]) + suff else: return fname + suff
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def prettify_path(path, leading=None):\r\n leading = (leading or os.getcwd()).replace(os.altsep, os.sep)\r\n s = os.path.splitext(path.replace(os.altsep, os.sep))[0]\r\n if s.startswith(leading):\r\n s = s.replace(leading, '')\r\n return s.strip(os.sep)", "def clean_directory_path(path):\n ...
[ "0.7440191", "0.67219174", "0.67075866", "0.6677584", "0.6655997", "0.6626382", "0.65079343", "0.6468608", "0.6372779", "0.6348614", "0.63264537", "0.632086", "0.6295324", "0.62762046", "0.6208288", "0.6200279", "0.619991", "0.61735916", "0.61682737", "0.6166878", "0.61517286...
0.0
-1
Create a text string summarizing how the motion correction was done.
def SummarizeMotionTargets(self): text = '\nSummary of motion-correction: \n' for epi in self.entry_map['epi']: info = self.info[epi] text += self.GetBase(epi, '') base = self.GetBase(info['base_entry'], '') text += ' ->3dvolreg-> %s[%s]' % (base, info['base']) if info['fmap_entry'] is not None: fmap = info['fmap_entry'] text += ' ->assume-registered-> %s' % self.GetBase(fmap, '') anat = self.info[fmap]['anat_ref'] if info['catmats']: text += ' ->3dAllineate-> %s' % \ self.GetBase(anat, '') else: text += ' ->assume-registered-> %s' % self.GetBase(anat, '') else: anat = info['anat_tgt'] text += ' ->assume-registered-> %s' % self.GetBase(anat, '') text += '\nEPIs should be in register with %s\n' % \ self.GetBase(self.anatomical, '') return text
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def prescription(self):\n prescription = \"\\n{0:>10}\\t{1:>10}\\t{2:>10}\\t{3:>10}\\n\".format(\"R\",\"Material\",\"d\",\"diameter\")\n for surface in self.lensSurfaces():\n prescription += \"{0:>10.2f}\\t{1:>10}\\t{2:>10.2f}\\t{3:>10.2f}\\n\".format(surface.R, str(surface.mat), surface.s...
[ "0.6384936", "0.634679", "0.63305056", "0.63101876", "0.62891483", "0.62351215", "0.62246543", "0.6216952", "0.61375284", "0.6118015", "0.6031679", "0.597089", "0.59577096", "0.59560966", "0.5955112", "0.59385556", "0.5877444", "0.58648694", "0.58440596", "0.58404136", "0.583...
0.69408065
0
Find the correct ref.dat file for each pfile.
def _GetRefdat(self): for rfile in self.refdats.keys(): # Get times for ref.dat files with a time-stamp. words = rfile.replace('.','_').split('_') if len(words) == 6 and words[-2].count(':') == 20: # This file was time-stamped by the sequence. Get the # date and time. file name format: # ref_Sep_9_2007_11:28:32.dat rtime[rfile] = hms_to_secs(words[-2]) for pfile in self.pfiles: min_difftime = 1.e20 self.info[pfile]['refdat'] = None for rfile in self.refdats.keys(): if rfile[:3] == 'ref' and 'dat' in rfile: # This is a reference data file. First see if the orientation is # appended. If the file has neither a time-stamp nor a plane and # there is more than one ref.dat, the epi reconstruction will # be aborted. rinfo = {} ref_file = None if 'sag' in rfile and self.info[pfile]['plane'] == 'sagittal': # self.info[pfile]['refdat'] = rfile ref_file = rfile break elif 'cor' in rfile and self.info[pfile]['plane'] == 'coronal': # self.info[pfile]['refdat'] = rfile ref_file = rfile break elif 'axial' in rfile and self.info[pfile]['plane'] == 'axial': # self.info[pfile]['refdat'] = rfile ref_file = rfile break elif len(self.refdats.keys()) == 1: # Use the only one if that is all there is. ref_file = rfile epi_time = hms_to_secs(self.info[pfile]['acqtime'].split()[-2]) if epi_time - rtime[rfile] < min_difftime and \ rftime[rfile] > epi_time: # Use the reference file that acquired nearest to the EPI # but before it. min_difftime = epi_time - rtime[rfile] # self.info[pfile]['refdat'] = rfile ref_file = rfile if ref_file: # Found a candidate. if not self.info[pfile]['refdat']: # Haven't found one yet, use it. self.info[pfile]['refdat'] = ref_file else: # Found two. Choose one in the same directory. oldpath = os.path.dirname(self.info[pfile]['refdat']) newpath = os.path.dirname(ref_file) pfile_path = os.path.dirname(pfile) if oldpath == newpath: # Same path, use the old one. self.info[pfile]['refdat'] = ref_file elif newpath == pfile_path: self.info[pfile]['refdat'] = ref_file # else Do nothing, use existing choice. elif not os.path.exists(rfile): self.info[pfile]['refdat'] = None elif os.stat(rfile).st_size > 0: # This path is taken if no info is encoded in the file name. # Don't use empty ref.dat files. self.info[pfile]['refdat'] = rfile
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_reference_data():\n return {f:read_local_file(f) for f in os.listdir(DATA_DIR)}", "def get_input_file(self, *args, refsep='$', docopy=True):\n # filename = self.get_data(*args, docopy=docopy)\n filename = args[1]\n ref_files = ref_from_image(filename, ['IDCTAB', 'OFFTAB', 'NPOLFI...
[ "0.5827411", "0.55954766", "0.5543815", "0.5515996", "0.5477217", "0.5447455", "0.5424407", "0.54009223", "0.5372652", "0.5370227", "0.5364059", "0.5320857", "0.5294063", "0.52870816", "0.5261717", "0.5260605", "0.5242285", "0.52146846", "0.51936823", "0.51909316", "0.5184307...
0.6289985
0
Order the epis and assign names defined in the template files.
def _GetEpiOrder(self): self.epi_series.sort() for series in self.epi_series: self.GetEpiAcqTimes(series) self.AssignEpiNames()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def AssignEpiNames(self):\n# Sort each run in the series by its acquisition time.\n epi_sort = self.epi_times.keys()\n epi_sort.sort()\n# Rewrite pfiles as an ordered list of p-files to be reconstructed.\n for idx in xrange(len(epi_sort)):\n entry = self.epi_times[epi_so...
[ "0.6324153", "0.5177771", "0.5155023", "0.5115554", "0.5099046", "0.50792223", "0.50491947", "0.5045727", "0.49775788", "0.49729112", "0.4915597", "0.49100342", "0.48777694", "0.48749626", "0.48587674", "0.48527986", "0.48417845", "0.48114392", "0.47574326", "0.47461927", "0....
0.50911015
5
Fill structure for sorting acquisition times.
def GetEpiAcqTimes(self, series): # Find minimum and maximum start times for each acquistion in series. self.epi_times = {} for entry in self.entry_map['epi']: # Loop through each file in this series. if self.info[entry]['series'] == series and \ self.info[entry]['tdim'] > 2: # Relate each entry to its time of acquisition. self.epi_times[self.info[entry]['acqtime']] = entry
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _sort_time(self):\n time = np.copy(self.data[\"time\"][:])\n ind_sorted = np.argsort(time)\n ind_valid: list[int] = []\n for ind in ind_sorted:\n if time[ind] not in time[ind_valid]:\n ind_valid.append(ind)\n n_time = len(time)\n for key, arra...
[ "0.6283316", "0.6228001", "0.6228001", "0.620398", "0.57670397", "0.57652324", "0.57647645", "0.5753199", "0.57444894", "0.5722699", "0.57000864", "0.5646933", "0.55846196", "0.55585563", "0.5551203", "0.5549234", "0.55354136", "0.5506358", "0.54945", "0.54808986", "0.5460458...
0.5688327
11
Assign names to each epi file based on information in the template.
def AssignEpiNames(self): # Sort each run in the series by its acquisition time. epi_sort = self.epi_times.keys() epi_sort.sort() # Rewrite pfiles as an ordered list of p-files to be reconstructed. for idx in xrange(len(epi_sort)): entry = self.epi_times[epi_sort[idx]] info = self.info[entry] if info['data_filetype'] == 'ge_data': self.pfiles_recon.append(entry) info['run'] = '%0d' % (self.n_epi) self.n_epi = self.n_epi + 1 plane = info['plane'] if not self.epinames.has_key(plane): plane = 'any' n_epi = self.epinames[plane]['n_epi'] if n_epi > len(self.epinames[plane]['names'])-1: if self.epinames.has_key('any') and \ n_epi < len(self.epinames['any']): plane = 'any' n_epi = self.epinames[plane]['n_epi'] else: self.DumpInfo() errstr = 'Not enough EPI names in template file' raise RuntimeError(errstr) # epiname = self.epinames[plane]['names'][n_epi] filebase = os.path.basename(self.epinames[plane]['names'][n_epi]) epi_mf_outdir = os.path.dirname(\ self.epinames[plane]['names'][n_epi]) epi_base = self.epinames[plane]['subdir'][n_epi] tmp_outdir = '%s/%s' % (self.tmpdir, epi_base) # Get output directory for raw epis. if self.no_motcorr: epi_r_outdir = epi_mf_outdir elif self.keep_epi_raw: epi_r_outdir = self.epi_scratch_space else: epi_r_outdir = tmp_outdir # Get output directory for motion-corrected epis. if self.keep_epi_mot: epi_m_outdir = self.epi_scratch_space else: epi_m_outdir = tmp_outdir info['outdir'] = epi_mf_outdir if n_epi < len(self.epinames[plane]['names']): epiname = self.epinames[plane]['names'][n_epi] info['imgfile'] = '%s/%s' % (epi_r_outdir, filebase) else: info['imgfile'] = '%s/s%0d_epi_run%0d' % \ (epi_r_outdir, n_epi, idx+1) self.epinames[plane]['n_epi'] += 1 info['mot_file'] = '%s/%s_mtn.txt' % (epi_mf_outdir, filebase) info['censor_prefix'] = '%s/%s' % (epi_mf_outdir, filebase) info['imgfile_t'] = '%s/%s_t' % (epi_m_outdir, filebase) if self.no_motcorr: info['imgfile_m'] = None info['imgfile_mf'] = None info['imgfile_final'] = info['imgfile'] else: info['imgfile_m'] = '%s/%s_m' % (epi_m_outdir, filebase) if self.no_fmapcorr or info['fmap_entry'] is None: info['imgfile_m'] = '%s/%s_m' % (epi_mf_outdir, filebase) info['imgfile_mf'] = None info['imgfile_final'] = info['imgfile_m'] else: info['imgfile_m'] = '%s/%s_m' % (epi_m_outdir, filebase) info['imgfile_mf'] = '%s/%s_mf' % (epi_mf_outdir, filebase) info['imgfile_final'] = info['imgfile_mf'] info['skip'] = self.skip info['motion_ref_frame'] = self.tmplt['motion_ref_frame'] info['motion_interp'] = self.tmplt['epi_motion_interp'] if not info['motion_interp'].startswith('-'): info['motion_interp'] = '-%s' % info['motion_interp'] info['filetype'] = self.tmplt['epi_file_format'] info['valid'] = True self.info[entry] = info if not self.no_motcorr: epi_base = os.path.basename(info['imgfile_m']) info['matfile_m'] = '%s/%s.aff12.1D' % (info['outdir'], epi_base) info['matfile_mcat'] = '%s/%scat.aff12.1D' % (info['outdir'], epi_base)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _add_filename_metadata(self, extra_metadata): \n \n # Make sure product_info section exists\n extra_metadata.setdefault('product_info', {})\n \n file_name = os.path.basename(self.fname)\n fn_comps = file_name.split(\"_\")\n \n if self.__class__ == SA...
[ "0.5765641", "0.5753263", "0.5717375", "0.5547444", "0.55394816", "0.54884017", "0.54623926", "0.53843206", "0.5353627", "0.531927", "0.52712166", "0.5261715", "0.523186", "0.52143687", "0.51853824", "0.51815045", "0.51764005", "0.5170443", "0.5149673", "0.51465887", "0.51442...
0.72918445
0
Dump the info object to a yaml file.
def DumpInfo(self): if self.logdir is None: return self.dumpfile = '%s/preprocess_info.yaml' % (self.logdir) try: f = open(self.dumpfile,'w') f.write(yaml.dump(self.info,default_flow_style=False, indent=4)) f.close() except IOError: self.errors = True errstr = 'Error accessing %s' % self.dumpfile raise IOError(errstr) self.LogErrors(errstr)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write(self, file=sys.stdout):\n d = self.to_dict()\n if d:\n yaml.dump([d], file, default_flow_style=False)", "def __exit__(self, *_):\n with self._info_yaml_file_path.open(\"w\") as info:\n self._yml.dump(self._info, info)", "def write(self):\n self.f.writ...
[ "0.7140269", "0.6745914", "0.67231625", "0.6591301", "0.6522555", "0.6498975", "0.64982736", "0.6444522", "0.63701254", "0.62643826", "0.62501603", "0.61706054", "0.60966444", "0.60667735", "0.6060812", "0.6049239", "0.6019757", "0.60015565", "0.60012335", "0.59733576", "0.59...
0.7883224
0
Load the info dictionary from a yaml file.
def UnDumpInfo(self): filename = '%s/preprocess_info.yaml' % self.logdir f = open(filename,'r') self.info = yaml.load(f.read()) f.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _load_datas(self) -> tp.Dict[str, dict]:\n with open(self._file, \"r\") as stream:\n try:\n load: tp.Dict[str, dict] = yaml.safe_load(stream)\n logger.info(\"YAML imported\")\n return load\n except yaml.YAMLError as exc:\n ...
[ "0.73581326", "0.70676595", "0.70332026", "0.6870221", "0.68692476", "0.6855705", "0.68462366", "0.680534", "0.6785473", "0.6768446", "0.6765261", "0.6741415", "0.6676443", "0.6671067", "0.66030633", "0.6585844", "0.6558528", "0.65580547", "0.6556143", "0.65504235", "0.654742...
0.0
-1
Ensure all epi files are recomputed by verifying that all output prefixes either don't exist or are deleted.
def CleanEpi(self): for entry in self.info.keys(): info = self.info[entry] if info['psdname'] == 'epi': for tag in ('imgfile', 'imgfile_m', 'imgfile_mf', 'imgfile_t'): if info.has_key(tag) and info[tag] is not None and \ os.path.exists(info[tag]): print 'Deleting %s*' % (info[tag], info['suffix']) cmd = '/bin/rm %s%s*' % (info[tag], info['suffix']) self.ExecCmd(cmd) if '.BRIK' in info['suffix']: cmd = '/bin/rm %s%s*' % (info[tag], \ info['suffix'].replace('.BRIK','.HEAD')) self.ExecCmd(cmd)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cleanupAthenaMP(workdir, outputfiles=[]):\n\n for ampdir in glob('%s/athenaMP-workers-*' % (workdir)):\n for (p, d, f) in os.walk(ampdir):\n for filename in f:\n if 'core' in filename or 'tmp.' in filename:\n path = os.path.join(p, filename)\n ...
[ "0.5984496", "0.5982936", "0.59657407", "0.59632736", "0.5948575", "0.5879957", "0.5869022", "0.57011503", "0.5682905", "0.56816006", "0.56665385", "0.5654999", "0.5644077", "0.5643207", "0.5622836", "0.5610936", "0.56048816", "0.56018406", "0.5580712", "0.55641234", "0.55630...
0.5448935
33
Create output directories if they don't already exist.
def CreateDirs(self): # First, create a list of directories. dnames = [] tags = ['', '_m', '_mf'] for entry in self.info.keys(): if self.info[entry]['type'] == 'epi': for tag in tags: fname = self.info[entry].get('imgfile%s' % tag, None) if fname is not None: dnames.append(os.path.dirname(fname)) else: if self.info[entry].get('outdir',None) is not None: dnames.append(self.info[entry]['outdir']) # Create them if they don't already exist. for dname in dnames: if not os.path.exists(dname): self.MakeDir(dname) if self.verbose: print 'mkdir %s' % dname
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _make_output_dir(self):\n out_dir = os.path.dirname(self._out_format)\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n LOG.info('Created output directory: %s', out_dir)", "def setup_outdir():\n try:\n shutil.rmtree(OUTDIR)\n except FileNotFoundError:\n ...
[ "0.8051411", "0.7945233", "0.7910225", "0.78630805", "0.78603286", "0.78426844", "0.78248453", "0.77685827", "0.77487713", "0.759815", "0.7533603", "0.75111306", "0.74982744", "0.745719", "0.7449486", "0.7433374", "0.7404734", "0.7333325", "0.72971153", "0.7263825", "0.723374...
0.6967413
37
Execute a bash command. This method is obsolete now. At one time it called a library function that worked around a deadlock bug in popen2
def ExecCmd(self, cmd, halt_on_error=True): self.f_bash.write("%s\n"%cmd) self.f_bash.flush() if not self.dry_run: try: execCmd(cmd, self.f_log, self.f_crash, self.verbose) self.f_log.flush() except RuntimeError, errstr: if halt_on_error: raise RuntimeError(errstr) else: self.LogErrors('%s' % errstr) return True else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bash_command(cmd):\n subprocess.Popen(['/bin/bash', '-c', cmd])", "def bash(cmd):\n subprocess.run(cmd, shell=True, executable='/bin/bash') # ,", "def bash_command(cmd):\n cmd = str(cmd)\n chain = cmd.split(\"|\")\n n_pipes = len(chain)\n\n for i in range(n_pipes):\n if i == 0:\n ...
[ "0.75032", "0.7133294", "0.70825005", "0.7043764", "0.70271105", "0.6944504", "0.6944504", "0.6944504", "0.6891018", "0.6809242", "0.67647636", "0.66959184", "0.66665363", "0.66484356", "0.6620994", "0.65703243", "0.65675676", "0.65449697", "0.6537749", "0.65313303", "0.65089...
0.0
-1
Convert anatomical images from dicom or ifiles to briks or niftis.
def ConvertAnat(self): if self.verbose: print 'Convert T1 and T2 images...' for entry in self.info: info = self.info[entry] if self.info[entry]['imgfile'] is None: continue if self.info[entry]['type'] in self.anat_types: key = self.info[entry]['type'] imgfile = self.info[entry]['imgfile'] cmd = 'convert_file %s %s %s %s' % (self.flip_opts, entry, \ imgfile, self.info[entry]['filetype']) checkfile = '%s%s' % (imgfile, self.info[entry]['suffix']) self.CheckExec(cmd, [checkfile]) if self.info[entry]['norm_src'] and self.skull_strip: cmd = "3dSkullStrip -input %s -prefix %s" % \ (checkfile, self.info[entry]['imgfile_skstrip']) checkfile = '%s+orig.BRIK' % \ (self.info[entry]['imgfile_skstrip']) self.CheckExec(cmd, [checkfile])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main(input_folder, output_images_folder, output_files_folder, bb_file,\n archive_folder, name_mapping):\n\n output_images_folder = Path(output_images_folder)\n output_files_folder = Path(output_files_folder)\n archive_folder = Path(archive_folder)\n output_images_folder.mkdir(exist_ok=True)...
[ "0.5878483", "0.5656656", "0.5646914", "0.5597715", "0.5594963", "0.55353117", "0.5508064", "0.54495406", "0.54446286", "0.5424856", "0.5355759", "0.5332224", "0.5326884", "0.53148586", "0.52936196", "0.5291717", "0.52617556", "0.5258072", "0.5257662", "0.525628", "0.52407", ...
0.5975682
0
Register the magnitude image from the fieldmap data to the hires structural. Save the matrices for later use in motion correction.
def AlignFieldmaps(self): for entry in self.entry_map['fmap']: info = self.info[entry] # Register the magnitude image at the shortest TR to the T1-IR # structural image. target = self.info[self.norm_src]['imgfile'] + \ self.info[self.norm_src]['suffix'] source = info['magfile'] + info['suffix'] matfile = info['matfile'] fmt = '3dAllineate -prefix NULL -1Dmatrix_save %s -base %s ' + \ '-source %s -cost mi -warp shift_rotate' cmd = fmt % (info['matfile'], target, source) self.CheckExec(cmd, [info['matfile']]) # Convert to unitary matrix (remove scaling component.) cmd = 'cat_matvec -ONELINE %s -P > %s' % \ (info['matfile'], info['matfile_unitary']) self.CheckExec(cmd, [info['matfile_unitary']]) # Rotate the magnitude image to the new grid. fmt = '3dAllineate -prefix %s -interp cubic -1Dmatrix_apply %s %s' cmd = fmt % (info['magfile_r']+info['suffix'], \ info['matfile_unitary'], info['magfile'] + info['suffix']) self.CheckExec(cmd, [info['magfile_r']+info['suffix']]) # Rotate the fieldmap to the new grid. fmt = '3dAllineate -prefix %s -interp cubic -1Dmatrix_apply %s %s' cmd = fmt % (info['imgfile_r']+info['suffix'], \ info['matfile_unitary'], info['imgfile'] + info['suffix']) self.CheckExec(cmd, [info['imgfile_r']+info['suffix']])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def recon(self, spirec):\n tmpdir = tempfile.mkdtemp()\n basename = 'recon'\n basepath = os.path.join(tmpdir, basename)\n pfilename = os.path.abspath(self.pfilename)\n\n # run spirec to get the mag file and the fieldmap file\n cmd = spirec + ' -l --rotate -90 --magfile --s...
[ "0.61536443", "0.5817665", "0.5757287", "0.5740727", "0.5657208", "0.56416994", "0.5542085", "0.54642224", "0.54503155", "0.54379785", "0.5347235", "0.52391076", "0.5095052", "0.5094995", "0.5089604", "0.50434417", "0.5038968", "0.5036496", "0.5012753", "0.497948", "0.4972110...
0.49835664
19
Convert anatomical images to briks.
def ProcessDTI(self): for entry in self.info: if self.info[entry]['type'] == 'dti': if self.verbose: print 'Processing DTI data in %s' % os.path.basename(entry) # dtiname = '%s/s%s_dti' % \ # (self.info[entry]['outdir'],self.info[entry]['series']) cmd = 'convert_file %s %s %s' % (entry, \ self.info[entry]['imgfile'], self.info[entry]['filetype']) fname = '%s%s' % \ (self.info[entry]['imgfile'], self.info[entry]['suffix']) self.CheckExec(cmd, [fname])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_bayer_images(self) -> typing.List[np.ndarray]:\n return [rbg_to_bayer_bg(c.get_image()) for c in self.cameras]", "def ConvertAnat(self):\n if self.verbose:\n print 'Convert T1 and T2 images...'\n for entry in self.info:\n info = self.info[entry]\n if ...
[ "0.60772157", "0.58175284", "0.5723036", "0.5568555", "0.5511439", "0.5510956", "0.5482435", "0.5474333", "0.5450582", "0.53886837", "0.53840804", "0.5379313", "0.5322583", "0.53046983", "0.52473575", "0.5239021", "0.52281713", "0.5225414", "0.5208012", "0.5204904", "0.515314...
0.0
-1
Convert ASL images to nifti.
def ProcessAsl(self): for entry in self.info: if self.info[entry]['type'] == 'asl': if self.verbose: print 'Processing ASL data in %s' % os.path.basename(entry) cmd = 'convert_file %s %s %s' % (entry, \ self.info[entry]['imgfile'], self.info[entry]['filetype']) fname = '%s%s' % \ (self.info[entry]['imgfile'], self.info[entry]['suffix']) self.CheckExec(cmd, [fname])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_to_nifti(log, brain):\n log.info('Doing convert_to_nifti')\n cmdargs = split('3dAFNItoNIFTI {}'.format(brain))\n proc = Popen(cmdargs, stdout=PIPE, stderr=STDOUT)\n log.info(proc.stdout.read())", "def ConvertAnat(self):\n if self.verbose:\n print 'Convert T1 and T2 image...
[ "0.6358437", "0.6124715", "0.5904855", "0.57210785", "0.57022226", "0.5685979", "0.5665494", "0.5628516", "0.5568676", "0.5515679", "0.5492054", "0.5473638", "0.5446226", "0.5407872", "0.54046696", "0.5352408", "0.53269494", "0.5309064", "0.53084403", "0.5288916", "0.52865076...
0.55716157
8
Create the fieldmap(s) and the corresponding magnitude images.
def MakeFieldmaps(self): if self.verbose: print 'Compute fieldmaps.' for entry in self.info: if self.info[entry]['type'] == 'fmap': if self.info[entry]['imgfile'] == None: # Fieldmap data not found. return # Make a magnitude image for use in checking registration. cmd = 'convert_file -f0 -m0 %s %s nii' % \ (entry, self.info[entry]['magfile']) self.CheckExec(cmd, [self.info[entry]['magfile'] + '.nii']) # Make fieldmap. Use separate loop in case make_fmap aborts. for entry in self.info: if self.info[entry]['type'] == 'fmap': fmapname = self.info[entry]['imgfile'] if not os.path.exists('%s.nii' % fmapname) or self.redo: # Couldn't find or existing fmap, compute a new one. if self.verbose: extra_args = '-v' else: extra_args = '' if self.info[entry]['correct_fmap_phase'] == 'force': extra_args += ' --force-slicecorr' elif self.info[entry]['correct_fmap_phase'] == 'omit': extra_args += ' --omit-slicecorr' cmd = 'make_fmap %s %s %s' % (extra_args, entry, fmapname) # error = self.ExecCmd(cmd, halt_on_error=False) if self.no_fmapcorr: halt_on_error = False else: halt_on_error = True error = self.CheckExec(cmd, ['%s.nii' % fmapname], \ halt_on_error=halt_on_error) if error: self.info[entry]['valid'] = False del self.fmaps[entry]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def AlignFieldmaps(self):\n for entry in self.entry_map['fmap']:\n info = self.info[entry]\n\n# Register the magnitude image at the shortest TR to the T1-IR\n# structural image.\n target = self.info[self.norm_src]['imgfile'] + \\\n ...
[ "0.677067", "0.64014757", "0.62887424", "0.6082136", "0.58927894", "0.58386827", "0.58132875", "0.57924163", "0.57924163", "0.57452226", "0.5743414", "0.57319504", "0.57206607", "0.57153034", "0.563697", "0.5628693", "0.56157184", "0.5559617", "0.55475587", "0.5537074", "0.55...
0.7716374
0
Create link to structural image if it doesn't already exist.
def LinkAnat(self): if self.anatomical is None: return for entry in self.info.keys(): info = self.info[entry] if info.has_key('anat_link'): self.LinkFiles(info['outdir'], self.anatomical)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_HTML_a_img(link_url, image_url):\n img = '<img src=\"' + image_url + '\">'\n linked_image = create_HTML_a(link_url, img)\n return linked_image", "def make_image(self, path):\n\t\treturn None", "def image(self, link, title, alt):\n if not link.startswith(('http://', 'https://')):\n ...
[ "0.6233887", "0.60792506", "0.60782003", "0.5984427", "0.58839905", "0.58724886", "0.5832461", "0.58206445", "0.580554", "0.5745005", "0.5738044", "0.5731455", "0.5714038", "0.5617503", "0.560202", "0.5583627", "0.55811983", "0.5542545", "0.55367535", "0.5529259", "0.5487097"...
0.0
-1
Create links to BRIK, HEAD, and .nii files.
def LinkFiles(self, srcdir, target): if '+orig' in target: tgt_prefix = target.replace('.BRIK','') tgt_prefix = tgt_prefix.replace('.HEAD','') linkfiles = ['%s.HEAD'%tgt_prefix, '%s.BRIK' %tgt_prefix] else: linkfiles = [target] for linkfile in linkfiles: linkname = '%s/%s' % (srcdir, os.path.basename(linkfile)) rel_linkdir = abspath_to_relpath(os.path.dirname(target), srcdir) rel_linkfile = '%s/%s' % (rel_linkdir, os.path.basename(linkfile)) if not os.path.exists(linkname) and not os.path.islink(linkname): cmd = 'cd %s && ln -s %s %s' % (srcdir, rel_linkfile, linkname) self.ExecCmd(cmd)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_links(self):\n for line in self.iter_files_to_install():\n arcname, link = line.split()\n if link == 'False':\n continue\n self.files.append(create_link(arcname, link, self.prefix))", "def makeLinks(self):\n self.deleteIndexFileIfExists()\...
[ "0.667778", "0.59682673", "0.5959984", "0.567664", "0.5635121", "0.55980814", "0.553239", "0.5522011", "0.54892784", "0.54057527", "0.5380412", "0.5370104", "0.53354234", "0.5332412", "0.5303601", "0.5300853", "0.52821183", "0.5264791", "0.5261789", "0.5261702", "0.5261481", ...
0.6162017
1
Extract the initial EPIs stored in dicom format.
def ExtractFirstEpi(self): for entry in self.info: if self.info[entry]['type'] == 'first_epi': epiname = self.info[entry]['imgfile'] cmd = 'convert_file %s -f0 %s %s %s' % \ (self.flip_opts, entry,epiname, self.info[entry]['filetype']) fname = '%s%s' % (epiname, self.info[entry]['suffix']) self.CheckExec(cmd, [fname]) self.info[entry]['imgfile'] = fname
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dicom_load():\n # Identify folders with EPI data\n dirs = [i for i in os.listdir(dcm_dir) if os.path.isdir(os.path.join(dcm_dir, i))]\n d_cnt = 0\n for d in dirs:\n dcm_file = os.path.join(dcm_dir,d,os.listdir(os.path.join(dcm_dir,d))[0])\n try:\n dcm_data = pydicom.dcmread...
[ "0.6132044", "0.59762484", "0.5642774", "0.53444785", "0.52427983", "0.52217543", "0.5216209", "0.51387924", "0.5125599", "0.50237185", "0.5018066", "0.5001039", "0.49862388", "0.49604324", "0.4950634", "0.49402615", "0.49259216", "0.49019468", "0.48623866", "0.4825966", "0.4...
0.6886132
0
Reconstruct the EPIs from pfiles.
def ReconEpis(self): run = zeros(100) if self.verbose: print 'Reconstruct EPIs' for pfile in self.pfiles_recon: if self.info[pfile]['refdat'] is None: # Find the ref.dat file later. continue if self.info[pfile]['compression'] is not None: # Data are compressed, copy to tmp. compression = self.info[pfile]['compression'] pfile_decomp = '%s/%s' % (self.tmpdir, \ os.path.basename(self.info[pfile]['pfile_decomp'])) if os.path.exists(pfile_decomp): errstr = 'Attempting to overwrite existing p-file (%s)' % pfile_decomp + \ ' in ReconEpis' cmd = '%s %s > %s' % \ (decompress_cmds[compression], pfile, pfile_decomp) self.ExecCmd(cmd) else: # Create a link on /tmp to the pfile so the link to ref.dat will also # be on /tmp, (which is always writeable.) pfile_decomp = '%s/%s' % (self.tmpdir, os.path.basename(pfile)) if not os.path.exists(pfile_decomp): os.symlink(pfile, pfile_decomp) refname, refcmpress = self.CheckCompression( \ self.info[pfile]['refdat']) if refcmpress is not None: refdat_decomp = '%s/%s' % (self.tmpdir, os.path.basename(refname)) cmd = '%s %s > %s' % \ (decompress_cmds[refcmpress], \ self.info[pfile]['refdat'], refdat_decomp) self.ExecCmd(cmd) else: refdat_decomp = self.info[pfile]['refdat'] if refdat_decomp is not None: if refdat_decomp != 'ref.dat': # Create link bearing the file name epirecon_ex expects. refdat_link = '%s/ref.dat' % self.tmpdir if not os.path.exists(refdat_link): if self.verbose: print 'ln -s %s %s' % (refdat_decomp, refdat_link) if os.path.islink(refdat_link): # ref.dat is a broken symbolic link. if self.verbose: print 'rm %s' % ref_file os.remove(refdat_link) try: os.symlink(refdat_decomp, refdat_link) except OSError: self.errors = True pfile_link = '%s/%s' % (self.tmpdir, os.path.basename(pfile_decomp)) os.symlink(pfile_decomp, pfile_link) os.symlink(refdat_decomp, '%s/ref.dat' % self.tmpdir) series = int(self.info[pfile]['series']) run[series] = run[series] + 1 epiname = self.info[pfile]['imgfile'] cmd = 'epirecon_ex -F -f %s -NAME %s -fmt brik -skip %d' % \ (pfile_decomp, epiname, self.skip) fname = '%s+orig.BRIK' % epiname self.CheckExec(cmd, [fname]) # self.epi_prefixes[pfile] = self.info[pfile]['imgfile'] else: errstr = '*******************************************\n' + \ 'No ref.dat file exists for %s\n' % pfile + \ '*******************************************\n' self.error_log = self.error_log + errstr self.f_crash.write(errstr)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def AssignEpiNames(self):\n# Sort each run in the series by its acquisition time.\n epi_sort = self.epi_times.keys()\n epi_sort.sort()\n# Rewrite pfiles as an ordered list of p-files to be reconstructed.\n for idx in xrange(len(epi_sort)):\n entry = self.epi_times[epi_so...
[ "0.64974564", "0.61578393", "0.57928175", "0.57109356", "0.5541757", "0.5491878", "0.5438996", "0.54297066", "0.5325947", "0.5219759", "0.52026135", "0.5192286", "0.5152777", "0.51420754", "0.5114544", "0.5093096", "0.50334144", "0.5020539", "0.5005299", "0.49603912", "0.4959...
0.64618903
1
Eliminate entries in epi recon table that have already been reconstructed. I don't remember why this is here but I know that at one time it was important.
def PruneEpiEntries(self): pruned = {} basefiles = [] baseentries = {} for entry in self.entry_map['epi']: if baseentries.has_key(self.info[entry]['basefile']): baseentries[self.info[entry]['basefile']].append(entry) else: baseentries[self.info[entry]['basefile']] = [entry] for entry in self.entry_map['epi']: targets = [] if self.no_motcorr: target = self.info[entry]['imgfile'] elif self.info[entry]['fmapname'] is None or self.no_fmapcorr: target = self.info[entry]['imgfile_m'] else: target = self.info[entry]['imgfile_mf'] targets.append(target + self.info[entry]['suffix']) targets.append('%s%s' % (self.info[entry]['censor_prefix'], '_censor.1D')) pruned[entry] = [True, baseentries[self.info[entry]['basefile']]] for target in targets: pruned[entry] = \ [False, baseentries[self.info[entry]['basefile']]] for key in pruned.keys(): if not pruned[key][0]: for entry in pruned[key][1]: pruned[entry][0] = False tmp = new_map = [] for entry in self.entry_map['epi']: if pruned[entry][0]: if self.verbose: print 'Skipping %s: Already reconstructed.' % targets[0] if entry in self.pfiles_recon: self.pfiles_recon.remove(entry) else: new_map.append(entry) self.entry_map['epi'] = new_map
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def elimination_ofconc(a2_data):\n for data in a2_data.values():\n data.pop('conc')\n return a2_data", "def nonphysicalxs_remotion(a2_data,res_nufi_removal):\n for i in a2_data['I'].keys():\n if i=='MACR' and res_nufi_removal==True:\n if 'nufi' in a2_data['I'][i]['R'].keys():\n ...
[ "0.6324132", "0.59182566", "0.5790216", "0.57542723", "0.57150894", "0.5654134", "0.55529314", "0.5501398", "0.54495615", "0.5417837", "0.5416808", "0.5410812", "0.53797144", "0.53654283", "0.5352083", "0.5328368", "0.53084546", "0.530087", "0.5300535", "0.5298115", "0.529656...
0.6166888
1
Convert epis reconstructed on the scanner.
def ConvertRtEpis(self): if self.verbose: print 'Convert EPIs to brik' for entry in self.entry_map['epi']: if ('epirt' in self.info[entry]['psdname'] or \ self.info[entry]['psdname'] == 'epi' or \ self.info[entry]['psdname'] == '*epfid2d1_64') and \ self.info[entry]['data_filetype'] == 'dicom': series = self.info[entry]['series'] if self.info[entry]['skip'] > 0: skip = '--skip=%s' % self.info[entry]['skip'] else: skip = '' cmd = 'convert_file %s %s %s brik' % \ (skip, entry, self.info[entry]['imgfile']) checkname = '%s+orig.BRIK' % (self.info[entry]['imgfile']) self.CheckExec(cmd, [checkname])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ReconEpis(self):\n run = zeros(100)\n if self.verbose:\n print 'Reconstruct EPIs'\n for pfile in self.pfiles_recon:\n if self.info[pfile]['refdat'] is None:\n# Find the ref.dat file later.\n continue\n if self.info[pfile]['compr...
[ "0.5776446", "0.5423936", "0.5320594", "0.5170984", "0.51104087", "0.5029466", "0.5025242", "0.50205046", "0.48759243", "0.4801463", "0.47459334", "0.47000486", "0.46995685", "0.4694864", "0.46835348", "0.467961", "0.4648427", "0.46379155", "0.46379155", "0.4625023", "0.45730...
0.6517192
0
Correct for motion and call SliceTimeCorrect.
def CorrectMotion(self): if self.verbose: print "Correct for motion" for entry in self.entry_map['epi']: info = self.info[entry] if os.path.exists(info['imgfile_m'] + info['suffix']): return # Always use brik for 3dDeconvolve. suffix = '+orig' epifile = '%s%s' % (info['imgfile'], suffix) prefix = info['imgfile_m'] base_entry = info['base_entry'] if info['base'] == 'start': # Use the first frame specified in template file. Defaults # to zero. base = info['motion_ref_frame'] else: # Use the last frame. base = self.info[base_entry]['tdim'] - info['skip']-1 base = ('%d' % base).replace(' ','') # Correct for slice-timing. self.SliceTimeCorrect(info, epifile) plane = info['plane'] anat_tgt = info['anat_tgt'] # anat_entry = self.anat_entry[plane] if info['catmats']: # Include additonal transformation in motion correction such # that final image is in register with the fieldmap, which has # been registered to the structural image that will be used for # spatial normalization. self.MotcorCatenate(info, base, anat_tgt) else: # Assume fieldmap is in register with the structural. self.Motcor(info, base) if info.get('fmapname', None) is None: # No fieldmap correction. if self.fsl_flip: # Flip the way fslview likes it. self.FSLFlip(info['imgfile_m'], info['imgfile_final']) elif info['suffix'] == '.nii': # Copy motion-corrected images from /tmp to output directory outfile = info['imgfile_final'] + info['suffix'] cmd = '3dcopy %s+orig %s' % (info['imgfile_m'], outfile) self.CheckExec(cmd, [outfile], force=True) cmd = '/bin/rm %s+orig*' % info['imgfile_m'] self.CheckExec(cmd, [], force=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def prepare_drift_correction(self, pos):\n\n\t\tprint(\"function not supported yet\")", "def refframe_correct(self, ra, dec, obstime, sobjs=None):\n # Correct Telescope's motion\n refframe = self.par['calibrations']['wavelengths']['refframe']\n if refframe in ['heliocentric', 'barycentric'] ...
[ "0.60318804", "0.54270923", "0.53947157", "0.53792375", "0.5303855", "0.5262295", "0.5260591", "0.52238935", "0.5190531", "0.5141736", "0.5131086", "0.50906426", "0.5085911", "0.5074954", "0.50716", "0.506681", "0.50448614", "0.50436366", "0.50436366", "0.50377357", "0.503135...
0.6062027
0
Compute motioncorrection transformation matrices, catenate with transform from fieldmap to structural, then inteprolate the data to the final grid.
def MotcorCatenate(self, info, base, anat_tgt): # First compute the transformation matrices due to epi-to-epi motion. fmt = '3dvolreg -prefix NULL -1Dmatrix_save %s -twopass ' + \ '-verbose -base %s+orig[%s] -dfile %s %s+orig' cmd = fmt % (info['matfile_m'], info['basefile'], base, \ info['mot_file'], info['imgfile_t']) self.CheckExec(cmd, [info['matfile_m']]) # Catenate with transformation from epi base image to the anatomical. cmd = 'cat_matvec -ONELINE %s -P %s -P > %s' % \ (self.info[anat_tgt]['matfile'], info['matfile_m'], \ info['matfile_mcat']) self.CheckExec(cmd, [info['matfile_mcat']]) # Interpolate the data to the new grid. fmt = '3dAllineate -prefix %s -interp cubic -1Dmatrix_apply %s ' + \ '-warp shift_rotate -base %s+orig[%s] %s+orig' cmd = fmt % (info['imgfile_m'], info['matfile_mcat'], info['basefile'], \ base, info['imgfile_t']) self.CheckExec(cmd, ['%s+orig.BRIK'%info['imgfile_m'], \ '%s+orig.HEAD'%info['imgfile_m']])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def CorrectMotion(self):\n if self.verbose:\n print \"Correct for motion\"\n for entry in self.entry_map['epi']:\n info = self.info[entry]\n\n if os.path.exists(info['imgfile_m'] + info['suffix']):\n return\n# Always use brik for 3dDeconvolve....
[ "0.62273926", "0.6207363", "0.6104549", "0.5850489", "0.58403313", "0.58327806", "0.5827466", "0.5725734", "0.5681582", "0.56439", "0.5603615", "0.5602365", "0.5596903", "0.5577693", "0.555407", "0.5526825", "0.5518143", "0.5506637", "0.5487023", "0.5475069", "0.54521763", ...
0.5767431
7
Motion correct using 3dvolreg. No slicetime correction.
def Motcor(self, info, base): fmt = '3dvolreg -prefix %s -twopass %s -verbose -base %s+orig[%s] ' + \ '-dfile %s %s+orig' cmd = fmt % (info['imgfile_m'], info['motion_interp'], \ info['basefile'], base, info['mot_file'], info['imgfile_t']) self.CheckExec(cmd, ['%s+orig.BRIK' % info['imgfile_m'], \ '%s+orig.HEAD' % info['imgfile_m']])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def CorrectMotion(self):\n if self.verbose:\n print \"Correct for motion\"\n for entry in self.entry_map['epi']:\n info = self.info[entry]\n\n if os.path.exists(info['imgfile_m'] + info['suffix']):\n return\n# Always use brik for 3dDeconvolve....
[ "0.65022635", "0.5807314", "0.5593249", "0.55692303", "0.5540268", "0.5456648", "0.5415684", "0.539106", "0.5349843", "0.5269265", "0.52284133", "0.52205676", "0.5212929", "0.5210599", "0.52055985", "0.52035886", "0.5195111", "0.51901066", "0.5163019", "0.5134201", "0.5127333...
0.47712728
92
Call the jump_censor program to characterize the degree of motion.
def JumpCensor(self): if self.verbose: print 'Computing censor files.' for entry in self.entry_map['epi']: if self.censor_interleave: input_file = '%s+orig' % self.info[entry]['imgfile'] interleave = '--interleave' else: interleave = '' if os.path.exists(self.info[entry]['mot_file']): input_file = self.info[entry]['mot_file'] else: input_file = '%s+orig' % self.info[entry]['imgfile'] cmd = \ "jump_censor -v --prefix=%s %s --store-plot --threshold=%f %s" % \ (self.info[entry]['censor_prefix'], interleave, self.censor_thresh, input_file) try: self.CheckExec(cmd, ['%s_censor.1D' % self.info[entry]['censor_prefix']], force=False) except: print 'Error computing censor files.'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def censoring_fcn(self, q):\n return 1.0", "def jump(self):\n\t\tself.vel = -10\n\t\tself.tick_count = 0\n\t\tself.height = self.y", "def on_jump_press(self) -> None:\r\n if not self.node:\r\n return\r\n if don.jumpFly:\r\n self.node.handlemessage(\"impulse\",self.nod...
[ "0.59280413", "0.541607", "0.5290688", "0.52872306", "0.5283519", "0.51918846", "0.5162083", "0.5111304", "0.50946474", "0.50678855", "0.5063725", "0.50507736", "0.50380087", "0.5027665", "0.50178564", "0.49831063", "0.4976855", "0.4957412", "0.49509645", "0.49407175", "0.491...
0.7094361
0
Check if output file exists, then execute commmand. If there is more than one output file, the command will be executed if at least one is missing.
def CheckExec(self, cmd, checknames, force=False, halt_on_error=True): gone = False names = [] for name in checknames: if '+orig' in name: if name.endswith('+orig'): names.append('%s.HEAD' % name) names.append('%s.BRIK' % name) elif name.endswith('HEAD'): names.append(name) newname = name[:-4] + 'BRIK' if newname not in checknames: names.append(newname) elif name.endswith('BRIK'): newname = name[:-4] + 'HEAD' if newname not in checknames: names.append(newname) names.append(name) else: names.append(name) for name in names: if not os.path.exists(name) and not os.path.exists('%s.gz'%name): gone = True elif self.redo or force or gone: os.remove(name) gone = True if self.redo or gone: self.ExecCmd(cmd, halt_on_error=halt_on_error) if '+orig.' in names[0]: name = names[0].replace('.BRIK','') name = name.replace('.HEAD','') append_history_note(name, cmd)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_for_preexisting_output_file(output_file_path):\n if path.exists(f\"{output_file_path}\"):\n print(\"Output file at specified save location file path already exists!\")\n print(\"Aborting operation!\")\n sys.exit()", "def exec_command_string_one_file(command_str,output):\n pri...
[ "0.6835208", "0.6785431", "0.667483", "0.6603357", "0.6349663", "0.606884", "0.60539275", "0.5998481", "0.59256774", "0.58965987", "0.5828717", "0.57747966", "0.5732897", "0.56906545", "0.56636125", "0.5657635", "0.56451756", "0.5630182", "0.56291837", "0.5592528", "0.5535338...
0.0
-1
Compute the temporal SNR for each epi, save in a nifti file, and store a summmary in a png file.
def ComputeSNR(self): for epi in self.entry_map['epi']: epifile = self.info[epi]['imgfile_final'] + self.info[epi]['suffix'] prefix = self.info[epi]['imgfile_final'] + '_snr' if not os.path.exists('%s_snr.png' % prefix): if self.verbose: print 'TemporalSnr(epifile=%s, prefix=%s)' % \ (epifile, prefix) try: TemporalSnr(epifile=epifile, prefix=prefix)() except: print("Error computing temporal SNR")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculate_SNR(snid: int, photo_data: pd.DataFrame, \n head_data: pd.DataFrame, code_zenodo: int, \n snana_file_index: int, code_snana: int):\n \n types_names = {90: 'Ia', 62: 'Ibc', 42: 'II', 67: '91bg', 52: 'Iax',\n 64:'KN', 95: 'SLSN', 994: 'PISN', 99...
[ "0.5856596", "0.58353704", "0.58029324", "0.5760767", "0.57194346", "0.5685501", "0.565635", "0.5610267", "0.5596072", "0.5580801", "0.5550373", "0.55146116", "0.55122143", "0.54595554", "0.53850317", "0.53831476", "0.53568536", "0.5345135", "0.53432506", "0.5322765", "0.5319...
0.78557074
0
Flip axes to orientation fslview expects.
def FSLFlip(self, infile, prefix): cmd = '3dresample -orient LPI -prefix %s.nii -inset %s+orig' % \ (prefix, infile) self.CheckExec(cmd, ['%s.nii' % prefix]) fname = '%s+orig.BRIK' % infile if os.path.exists(fname): os.remove(fname) fname = '%s+orig.HEAD' % infile if os.path.exists(fname): os.remove(fname)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def flip(self, axes=None):\n axes = self._make_axes_as_num(axes)\n vals = self.values\n if 0 in axes:\n vals = vals[::-1, :]\n if 1 in axes:\n vals = vals[:, ::-1]\n return Signal2D(vals, index=self.index, columns=self.columns)", "def flip_axes(input_file,...
[ "0.7156261", "0.7035609", "0.7033058", "0.6533171", "0.64487714", "0.63454676", "0.6332849", "0.6305558", "0.63048905", "0.62795544", "0.61534804", "0.6124441", "0.6114931", "0.60523576", "0.6050565", "0.5979607", "0.5953482", "0.5949251", "0.5904345", "0.5891284", "0.58893",...
0.0
-1
Change ownership to group read read/write.
def Chown(self): cmd = 'chmod -R 0775 %s' % self.procdir self.ExecCmd(cmd)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def changeOwnership(self, document):\n document.changeOwnership(getSecurityManager().getUser(), False)", "def chown(self, user, group, rec=0):\n uid = getuserid(user)\n gid = getgroupid(group)\n if rec:\n for x in self.visit(rec=lambda x: x.check(link=0)):\n ...
[ "0.66892415", "0.62186366", "0.6200737", "0.6195593", "0.6075884", "0.6041262", "0.599601", "0.59780955", "0.5939623", "0.5920451", "0.58722764", "0.5808822", "0.5740759", "0.565018", "0.56067586", "0.55544484", "0.5519573", "0.5503808", "0.5484997", "0.5481513", "0.54399353"...
0.56320053
14
Store some useful information in the log file.
def LogProcess(self): time = datetime.today().strftime('%a %Y%b%d %X') # Get user name. f = os.popen("whoami","r") user = f.read().strip() f.close() entry = '%s\t%s\t%s\t%s\n' % (time, self.topdir, user, self.version) if ismounted(c.exams_file): # Append info to the exams file. try: f = open(c.exams_file,'a+') f.seek(0, 2) f.write(entry) f.close() except: # Not a huge problem if this doesn't work. pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def log_data(self):\n\n self.check_dir()\n with open(self.log_file, \"a\") as logger_file:\n logger_file.write(\"{}, {}\\n\".format(self.time, self.msg))", "def log(self):\n f = open(self.log_dir + 'parsed.log', 'a')\n try:\n # Write: local time | CurrentCost \"t...
[ "0.7579466", "0.7374402", "0.7347465", "0.70612556", "0.69241893", "0.69229144", "0.69018483", "0.6860829", "0.68271667", "0.67728823", "0.6762465", "0.6749048", "0.6685187", "0.66748893", "0.6671111", "0.66622746", "0.6618556", "0.6599946", "0.6599946", "0.6597023", "0.65964...
0.64186484
36
Delete temporary files, close log files and email results.
def CleanUp(self): if (not self.keep_epi_raw or not self.keep_epi_mot) \ and not self.opts.debug_tmp: self.tmp.Clean() overall_msg = self.SummaryErrorMessage() if self.tmplt and not self.no_email: EmailResults(self.tmplt['email'], overall_msg, \ self.topdir, self.dumpfile, self.logfile, self.motcor_summary) # Write the error message to the log file. if self.f_log is None: # Log file not opened yet, do it now. if self.logdir is not None: logfile = '%s/preprocess.log' % self.logdir f_log = open(logfile,'w') f_log.write('\n%s\n' % overall_msg) f_log.close() else: self.f_log.write('\n%s\n' % overall_msg) sys.exit()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def teardown():\n for filename in files_to_delete:\n delete_file(filename)", "def cleanup(self):\n if self.log_fo:\n self.log_fo.close()", "def classCleanup(cls):\n cls.RemoveTempFile(\"child_send1.txt\")\n cls.RemoveTempFile(\"child_read1.txt\")\n cls.R...
[ "0.68277276", "0.67437613", "0.660802", "0.6526464", "0.6455161", "0.6444915", "0.64396834", "0.64194924", "0.6357929", "0.635103", "0.6348235", "0.6337223", "0.631956", "0.62995", "0.6263754", "0.6261455", "0.6259881", "0.6235108", "0.6226701", "0.62038773", "0.6193479", "...
0.58914703
56
Create summary message for email.
def SummaryErrorMessage(self, error_log=None): if error_log is None: error_log = self.error_log # server = socket.gethostname().split('.')[0] mssg = '\nPreprocessing script complete for data in %s\n\nServer: %s\n'\ % (self.topdir, self.server) # Log time. ms = time.time() ms = int(1000*(ms - int(ms))) mssg += '\nTime: %s:%03d\n' % \ (datetime.today().strftime('%a %b %d, %Y; %X'), ms) if len(error_log) > 0: mssg += 'Command: %s\n\nSummary:\n' % (' '.join(sys.argv)) lines = error_log.split('\n') for line in lines: if line.startswith('Description:'): mssg += line[12:] mssg += '\n\nDetails:' + error_log else: mssg += '\nNo problems detected (this does NOT imply that everything was computed.).\n\n' return mssg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def composeSummaryEmail(self):\r\n message = \"\"\"From: Douglas Gregor <dgregor@osl.iu.edu>\r\nTo: boost@lists.boost.org\r\nReply-To: boost@lists.boost.org\r\nSubject: [Report] \"\"\"\r\n message += str(self.numFailures()) + \" failures on \" + branch\r\n if branch != 'trunk':\r\n ...
[ "0.63322425", "0.61821836", "0.61753136", "0.6175085", "0.61733943", "0.6131707", "0.61256206", "0.60573125", "0.6047461", "0.6034194", "0.60252714", "0.6003194", "0.5889488", "0.5864201", "0.584809", "0.58364767", "0.58144146", "0.5763619", "0.57625306", "0.5760647", "0.5760...
0.5509327
39
Bear in mind some simulations cannot contain neither peds nor objs
def __init__(self, no_peds=0, peds_topics=[], num_s_samples=1, objs=None): self.global_ts = -1 self.PEDESTRIAN_TOPICS = peds_topics self.drone = DroneModel(num_s_samples) self.subject = SubjectModel(num_s_samples) if no_peds > 0: self.peds = { str(i): PedestrianModel(num_s_samples) for i in range(no_peds) } else: self.peds = None self.objs = objs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_simulations(self):\n pass", "def simulation():\n\n return {\n \"type\": \"class\",\n \"base\": \"iso.process_step\",\n \"is_abstract\": False,\n \"is_document\": True,\n \"pstr\": (\"({}/{}/{})\", (\"used\", \"ran_for_experiments\", \"ensemble_id\")),\n ...
[ "0.62294304", "0.5881555", "0.5799523", "0.5603874", "0.5549049", "0.55471927", "0.5523414", "0.55085677", "0.5462701", "0.54552025", "0.54473364", "0.5418987", "0.5409605", "0.54048645", "0.5397913", "0.5395056", "0.53824604", "0.53729665", "0.53604305", "0.535905", "0.53540...
0.53972495
15
Check if the given data is not filled already in the bag
def is_coord_empty(self, data): check = False if data["topic"] in DRONE_POS_TOPICS: check = self.drone.check_if_pos(data["coord"]) elif data["topic"] in DRONE_VEL_TOPICS: check = self.drone.check_if_vel(data["coord"]) elif data["topic"] in DRONE_ACC_TOPICS: check = self.drone.check_if_acc(data["coord"]) elif data["topic"] in SUBJECT_TOPICS: check = self.subject.check_if_pos(data["coord"]) elif data["topic"] in self.PEDESTRIAN_TOPICS: check = self.peds[data["pid"]].check_if_pos(data["coord"]) return check
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def has_data(self):\n return ([0] != self.__contexts) and ([0] != self.__weights)", "def empty(self):\n return 0 >= len(self.__data)", "def verify_if_basket_is_empty(self):\n self._basket.verify_if_basket_is_empty()", "def not_empty(entry):\n gt_boxes = entry['boxes']\n ret...
[ "0.65717185", "0.63385725", "0.6331197", "0.62527806", "0.61652166", "0.61505044", "0.608145", "0.608145", "0.60609126", "0.60432065", "0.60016364", "0.5989013", "0.59870905", "0.5972411", "0.59252816", "0.59170127", "0.59170127", "0.59170127", "0.59170127", "0.59170127", "0....
0.0
-1
Stores the given data
def add(self, data): if data["topic"] in DRONE_POS_TOPICS: self.drone.set_pos_val(data["ts"], data["coord"], data["value"]) elif data["topic"] in DRONE_VEL_TOPICS: self.drone.set_vel_val(data["ts"], data["coord"], data["value"]) elif data["topic"] in DRONE_ACC_TOPICS: self.drone.set_acc_val(data["ts"], data["coord"], data["value"]) elif data["topic"] in SUBJECT_TOPICS: self.subject.set_val(data["ts"], data["coord"], data["value"]) elif data["topic"] in self.PEDESTRIAN_TOPICS: self.peds[data["pid"]].set_val(data["ts"], data["coord"], data["value"])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def store_data(self, data):\n self.data.append(data)", "def store_data(self, store_data):\n self._store_data = store_data", "def saveData(self):\n pass", "def save_data(self):\n pass", "def save(self, data):\n\t\tif self.value:\n\t\t\tdata['value'] = self.value", "def store_da...
[ "0.82004386", "0.788964", "0.7607003", "0.75672203", "0.753746", "0.7510255", "0.74823844", "0.74805313", "0.7291415", "0.7250676", "0.71713775", "0.7075155", "0.7072606", "0.7048692", "0.69521874", "0.69172645", "0.6839893", "0.68263394", "0.68209374", "0.6810317", "0.678245...
0.0
-1
Check if all models stored are complete for the given timestamp.
def is_full(self): core_full = self.drone.complete() and self.subject.complete() if self.peds is None: return core_full else: return core_full and all([p.complete() for p in self.peds.values()])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def complete(self, verbose=False):\r\n if (self.__num_tasks == 0 or\r\n self.__arrival_time == 0 or\r\n self.__num_tasks != len(self.__tasks)):\r\n #\r\n if verbose:\r\n print (\"Request %s incomplete. %d expected tasks, %d recorded tasks, \"\r\n ...
[ "0.617297", "0.6029285", "0.59201694", "0.5905041", "0.5833025", "0.5806174", "0.58009607", "0.5789535", "0.5736804", "0.56675947", "0.566142", "0.56097996", "0.55602163", "0.55594164", "0.555323", "0.55530936", "0.55497646", "0.55493504", "0.5537272", "0.5535819", "0.5528456...
0.57210994
9
Empties the models within the bag
def empty_bag(self): if self.peds is not None: for _, model in self.peds.items(): model.reset() self.drone.reset() self.subject.reset()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clear():\n\t\tModel.counter = 0", "def ClearModels(self):\n self._modelFileNames = []\n self._models = []\n self.Modified(readAgain=True)", "def clearmodels(self):\n \n dbpath, config = self._start() \n ModelDescriptionTable(dbpath).empty()\n...
[ "0.77507436", "0.76478666", "0.7637113", "0.74050826", "0.7388874", "0.73118776", "0.7252325", "0.709604", "0.7079502", "0.6967123", "0.69053054", "0.6871042", "0.68186563", "0.6815605", "0.68100023", "0.68016225", "0.6794553", "0.6774137", "0.67709094", "0.67597353", "0.6744...
0.8674077
0
Flushes the stored data and empties the bag. Use the drone ts
def get_data(self): data = { "ts": self.drone.pos[0][0], "drone": self.drone, "subject": self.subject, "peds": self.peds, # can be None "objs": self.objs # can be None } self.empty_bag() return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clean_data():\n redis_db.flushdb()", "def _flush(self):\n self._d = {}", "def flush(self):\n self.cur_stocks = self.cur_stocks.drop(self.cur_stocks.index)\n # add history that flushed whole stocks", "def reset(self):\n self.temp_data.clear()", "def empty_bag(self):\n ...
[ "0.7044794", "0.6967814", "0.68903756", "0.6873136", "0.6852722", "0.68504673", "0.68156374", "0.67802024", "0.6756149", "0.6756149", "0.67459303", "0.67278725", "0.6683704", "0.6683704", "0.6683704", "0.66800135", "0.6664512", "0.66624105", "0.66586465", "0.66405725", "0.659...
0.0
-1
Store data samples sent in multiple batches
def store_in_bag(self, data): # timestamp is (s, nanos): data["ts"], data["tnanos"] self.bag.add(data) # Ensure that all data have the same timestamp and are not None # Also there can't be more than a sample per second. if self.bag.is_full(): if random() > 0.99999: print("Telemetry data: ", data["topic"]) print("Bag data: ", self.bag.print_data()) # Then flush the data to process it and empty the bag data = self.bag.get_data() self.on_full(data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def train(self, num_batches: int):", "def process(self, data_batch: Any, data_samples: Sequence[dict]) -> None:", "def set_batch_data():\r\n if not os.path.exists(filepath):\r\n download_data()\r\n for n in range(0,6):\r\n d = read(filepath + flist[n])\r\n metadata = read(filepath + ...
[ "0.67769766", "0.6642692", "0.66230625", "0.6600419", "0.6592506", "0.64439774", "0.64208335", "0.63826704", "0.62283033", "0.619053", "0.61626637", "0.6160936", "0.61458206", "0.6139684", "0.61335284", "0.61245483", "0.61134374", "0.6109564", "0.60928875", "0.60751015", "0.6...
0.58928144
53
Do computations when multiple samples with equal timestamp are received
def on_full(self, bag_data): # bag_data is a dict {ts, drone, subject, peds, objs} # Bear in mind some simulations cannot contain neither peds nor objs dr_pos, dr_vel, dr_acc = bag_data["drone"].get_data() subj_pos = bag_data["subject"].get_pos() peds_poses = [] if bag_data["peds"] is not None: peds_poses = [p.get_pos() for p in bag_data["peds"].values()] objs_poses = [] if bag_data["objs"] is not None: objs_poses = [m.pose for m in bag_data["objs"]] # Bear in mind some simulations cannot contain neither peds nor objs # (ped and obj force would be 0.0) forces = [0,0,0] if not self.file.closed: rowdata = [ bag_data["ts"], dr_pos, dr_vel, dr_acc, subj_pos, forces[0], forces[1], forces[2] ] + peds_poses + objs_poses # print("\n\nROWDATA: ", rowdata, "\n\n") self.csv_writer.writerow(rowdata)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def audioEpochFeats(cur,uid,timestamp):\n\tuidA = uid +'audio'\n\n\tvar_stats = []\n\tstd_stats = []\n\tnoise = []\n\tvoiceToSilenceRatio = []\n\n\tfor i in range(1,24):\n\t\ths_timestamp = timestamp-86400+(i-1)*hour\n\t\the_timestamp = timestamp-86400+i*hour\n\t\t# Determining if start/end time of given hour is i...
[ "0.60448503", "0.5995645", "0.58302253", "0.57981944", "0.5796712", "0.57483286", "0.57325566", "0.57073534", "0.56920207", "0.5675369", "0.56446433", "0.5632045", "0.5621465", "0.5616494", "0.559965", "0.5543054", "0.55349135", "0.55306476", "0.5527352", "0.55071265", "0.547...
0.0
-1
Get the list of pedestrian topics
def get_ped_topics(n_peds): ls = [] for n in range(n_peds): ls += [coord.format(n) for coord in PED_TEMPL_TOPICS] return ls
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_topics(self):\r\n return [x[0] for x in get_published_topics()]", "def topics(ctx):\n pass", "def topics(self):\r\n return topics.Topics(self)", "def topics(self, project: str) -> list:\n assert self.exists(project), f'Project {project} inesistente'\n\n cursor = self.pr...
[ "0.7867339", "0.73566186", "0.733255", "0.73222786", "0.72298735", "0.71772873", "0.7127868", "0.7058027", "0.7025698", "0.6943055", "0.68688285", "0.6814466", "0.67832524", "0.6769526", "0.6753593", "0.6670809", "0.660663", "0.6604779", "0.6540701", "0.65293837", "0.650491",...
0.68354625
11
Stop timer when user enter a nonescape command.
def stop(self): command = input("Enter anything to finish (or 'exit' to cancel)>>>") return command != 'exit'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def handleKeyboardInterupt():\n System.stopExecution(TERMINATED_BY_USER)", "def on_KeyboardInterrupt(player):\n print(\"paused by KeyboardInterrupt\")\n player.edit()", "def term():\n curses.endwin()\n unicornhathd.off()", "def stop_timer(self):\r\n self.countdownTimer.stop()", "def cb_...
[ "0.61809236", "0.59877855", "0.590018", "0.5879762", "0.58237314", "0.5802075", "0.57889146", "0.57686067", "0.5766843", "0.57374257", "0.5713238", "0.5673947", "0.5642621", "0.564084", "0.5605514", "0.5603989", "0.55528235", "0.55528235", "0.55528235", "0.55528235", "0.55464...
0.5726188
10
Logs an entry for the homework log using a timer
def do_start(self, input): course_name = course.course_name(input) if course_name in config.current_courses: timer = Timer(course_name) timer.start() if self.stop(): timer.stop()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def timer(work_log):\n start = time.time()\n print '\\nyou started working at %s\\n' % time.ctime(int(start))\n\n input = raw_input(\"\\ntype 'stop' to stop timer...\\n\")\n while (input != 'stop'):\n input = raw_input(\"\\ntype 'stop' to stop timer...\\n\")\n work = raw_input(\"\\nwhat'd you...
[ "0.70600265", "0.64849997", "0.64838594", "0.6340308", "0.626186", "0.62565213", "0.6244451", "0.61845344", "0.6172261", "0.61576736", "0.61439645", "0.6137266", "0.61355144", "0.61291295", "0.60836816", "0.60411006", "0.60167503", "0.6007121", "0.59954494", "0.5931569", "0.5...
0.0
-1
Determines if an input is a float.
def is_float(self, input): try: float(input) return True except ValueError: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_float(x):\r\n try:\r\n float(x)\r\n except ValueError:\r\n return False\r\n return True", "def isfloat(value):\r\n try:\r\n float(value)\r\n return True\r\n except ValueError:\r\n return False", "def isFloat(value): \n try:\n float(value...
[ "0.82458997", "0.8190943", "0.81595534", "0.81558645", "0.81085587", "0.8093256", "0.807079", "0.807079", "0.80703425", "0.8061959", "0.8060793", "0.8011374", "0.7838578", "0.7823464", "0.77867234", "0.77559304", "0.7752695", "0.7747797", "0.7746936", "0.77243704", "0.7696662...
0.884747
0
Takes a valid float from the user or the escape command
def hours_studied(self): value = input("Enter value (or 'exit')>>>") while not self.is_float(value): value = input("Enter value (or 'exit')>>>") # Escape command if value == 'exit': return value return float(value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def prompt_float_input(prompt_name: str, get_user_input: GetInputFunc) -> float:\n try:\n return float(get_user_input(f\"{prompt_name}:\"))\n except (ValueError, IndexError) as e:\n raise InvalidInput(str(e))", "def get_float(self, prompt=\"> \"):\n\t\twhile True:\n\t\t\tans = raw_input(promp...
[ "0.71381927", "0.7047657", "0.7025061", "0.69696033", "0.67903113", "0.6774239", "0.67697823", "0.66692364", "0.6616977", "0.6591584", "0.656603", "0.65500516", "0.6470003", "0.64432317", "0.64170504", "0.6386314", "0.6362336", "0.6322216", "0.6319651", "0.63095284", "0.62830...
0.6430425
14
Logs an entry for the homework log using a userentered value
def do_record(self, input): course_name = course.course_name(input) if course_name in config.current_courses: hours_studied = self.hours_studied() # Checks for escape command if hours_studied != 'exit': write.to_csv(course_name, hours_studied)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def log(exercise):\n global logfile\n msg = raw_input(\"Enter your message. \")\n logfile.write(exercise + \" >>> \" + msg + \"\\n\")", "def logentry(jobid, label, typ, content=None, path=None):\n ud = str(uuid.uuid4())\n db.logs.save({\"uuid\":ud, \"jobid\":jobid, \"label\":label, \"type\":typ, \...
[ "0.68945855", "0.6559437", "0.6485974", "0.62908894", "0.62812376", "0.6237484", "0.6155005", "0.60995555", "0.60548156", "0.6035026", "0.60128146", "0.59510344", "0.5913017", "0.59052706", "0.5875756", "0.58669865", "0.5864817", "0.5801178", "0.5774821", "0.57367194", "0.569...
0.54031944
54
Given a latitude and longitude calculate distance to airplane including altitude, return kilometers
def distance(self, lat: float, long: float) -> float: # Initial euclidian formula below # diff_lat = self.lat - lat # diff_long = self.long - long # euclidian = math.sqrt((diff_lat ** 2 + diff_long ** 2 + self.altitude ** 2)) return self._haversine(lat, long) + self.altitude / 1000
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_distance_meters(aLocation1, aLocation2):\n dlat = aLocation2.lat - aLocation1.lat\n dlong = aLocation2.lon - aLocation1.lon\n return math.sqrt((dlat*dlat) + (dlong*dlong)) * 1.113195e5", "def get_distance_metres(aLocation1, aLocation2):\n dlat = aLocation2.lat - aLocation1.lat\n dlong = aL...
[ "0.70389205", "0.6926336", "0.6884272", "0.6884272", "0.6884272", "0.6884272", "0.6884272", "0.68767065", "0.68557805", "0.67505145", "0.67494184", "0.67259705", "0.6697738", "0.6696931", "0.66856134", "0.66700953", "0.65975", "0.6567814", "0.6560275", "0.6525618", "0.6513142...
0.6852247
9
Both paths should be full.
def _put(self, src_fname, dst_fname): logging.info('Transferring file %s to %s', src_fname, self._ip_addr) sftp_cli = self._get_sftp_client() if sftp_cli is None: raise Exception('Not supported without ssh.') return sftp_cli.put(src_fname, dst_fname)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __combine_path(self, other):\n self.path = other.path + self.path", "def join(self, path, *paths):", "def aix_path_join(path_one, path_two):\n if path_one.endswith('/'):\n path_one = path_one.rstrip('/')\n\n if path_two.startswith('/'):\n path_two = path_two.lstrip('/')\n\n fi...
[ "0.645393", "0.6182501", "0.61075705", "0.60068315", "0.60003424", "0.5978063", "0.5929083", "0.592156", "0.59077317", "0.59016716", "0.58891624", "0.58888334", "0.584172", "0.5833566", "0.5822319", "0.579614", "0.57439196", "0.5732762", "0.57041806", "0.5696847", "0.5693155"...
0.0
-1
dst_fname should be full path. Creates directories if required.
def put_file(self, src_fname, dst_fname): dst_fname = os.path.normpath(dst_fname) self.mkdirs(os.path.dirname(dst_fname)) self._put(src_fname, dst_fname)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _prepare_dst_dir(self, dst, src=None, perm=None, **kwargs):\n if self.isdir(dst):\n full_dst = os.path.join(dst, os.path.basename(src)) if src else dst\n\n elif self.isfile(dst):\n full_dst = dst\n\n else:\n # interpret dst as a file name, create missing di...
[ "0.7712215", "0.7621111", "0.6710671", "0.6514752", "0.63302606", "0.62029976", "0.6054697", "0.6051175", "0.6038802", "0.5964814", "0.5951201", "0.59485644", "0.5948254", "0.5946226", "0.59095293", "0.58884376", "0.58736056", "0.58708906", "0.5866591", "0.5849726", "0.584694...
0.71607935
2
gpu_model_to_scale is a dict from model string to scale.
def avail_gpu_compute(self, gpu_model_to_scale): self._check_spy_stats_available() l = [] for u, model in zip(self._util.gpu_compute, self._capacity.gpu_model): found = False for k, scale in gpu_model_to_scale.items(): if k in model: found = True break if found: l.append(scale * (1 - u)) else: raise Exception('Unknown GPU model %s found on host %s' % (model, self.name)) return l
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def scale_model(model, scale):\n params = model.named_parameters()\n dict_params = dict(params)\n with torch.no_grad():\n for name, param in dict_params.items():\n dict_params[name].set_(dict_params[name].data * scale)", "def scale_model(model,scaleparname='A',scaleval=1):\n model =...
[ "0.67240673", "0.6272403", "0.5994706", "0.5941544", "0.5833461", "0.5681746", "0.5676147", "0.5382901", "0.5348", "0.53371793", "0.5301205", "0.52537143", "0.52262044", "0.51800525", "0.51736313", "0.5139863", "0.512118", "0.5083435", "0.50683033", "0.50677323", "0.50584626"...
0.6472911
1
From all the data, it takes the columns TopicID and Question and for each topic, count the number of+ different SubTopic/Question
def get_data_frame_count_type_of_topic(data_frame: DataFrame) -> pb.DataFrame: try: data_frame = data_frame \ .select("TopicID", "Question") \ .distinct() \ .groupBy("TopicID") \ .count() \ .sort("TopicID") except Py4JError: raise AnalysisException('One columns is incorrect') print("The following table represent the number of the type of each topic") data_frame.show() data_frame_pandas = data_frame.toPandas() return data_frame_pandas
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def topic_count():\n # get the number topics and their counts as tuples: ('Topic', 123)\n query = peewee.RawQuery(Post, \"select topic, count(topic) from post group by topic\").tuples()\n\n # turn the result of the query object into a list of tuples\n tuple_result = []\n for each_tuple in query:\n ...
[ "0.6717123", "0.63702446", "0.61713773", "0.5966875", "0.5961095", "0.5927321", "0.5897049", "0.579996", "0.5752333", "0.5736918", "0.5653994", "0.5621641", "0.55750763", "0.54705304", "0.54477894", "0.54441655", "0.5436638", "0.5420717", "0.5412772", "0.53985864", "0.5367074...
0.62771106
2
Take an specific list from rdd spark, which is formed as list of tuples (Topic, Question)
def get_rdd_count_type_of_topy(rdd: list) -> pb.DataFrame: data_frame_pandas = pb.DataFrame(rdd, columns=['Topic', 'Question']) print(data_frame_pandas) return data_frame_pandas
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_questions_of_topic(topic):\n\n dynamodb = boto3.resource(\"dynamodb\", region_name=\"eu-central-1\")\n question_table = dynamodb.Table(\"Questions\")\n\n fe = Attr(\"TopicId\").eq(topic.get(\"TopicId\"))\n response = question_table.scan(FilterExpression=fe)\n questions = ...
[ "0.57467985", "0.5711735", "0.55085766", "0.5506645", "0.53411525", "0.5329091", "0.53050333", "0.52999526", "0.524876", "0.5236957", "0.5155814", "0.51508963", "0.514394", "0.51368946", "0.5046468", "0.50216925", "0.5004249", "0.4996361", "0.49733895", "0.49623284", "0.49622...
0.5424828
4
From all the data, it takes the columns TopicID, and count the topic based on the gender
def get_data_frame_count_male_gender_by_topic(data_frame: DataFrame) -> pb.DataFrame: data_frame_topic = data_frame \ .filter(data_frame["Stratification1"].contains("Male")) \ .distinct() \ .groupBy("TopicID") \ .count() \ .sort("TopicID") print("The following table represent the number of men group by the topic: ") data_frame_topic.show() data_frame_pandas = data_frame.toPandas() return data_frame_pandas
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_male_female_topicsDF(data_dict, gender):\n dataDF = pd.DataFrame.from_dict(data_dict[gender], orient='index')\n outlet_gender_topicsDF = pd.json_normalize(dataDF['topic_mean'])\n outlet_gender_topicsDF.index = dataDF.index\n outlet_gender_topicsDF = outlet_gender_topicsDF.sort_index()\n outl...
[ "0.6474987", "0.63504976", "0.6320572", "0.62746453", "0.627228", "0.6258495", "0.61637425", "0.6114557", "0.5977991", "0.58644605", "0.5836324", "0.5831244", "0.5673213", "0.5647648", "0.5610889", "0.5574155", "0.5492381", "0.54264843", "0.53719985", "0.53717935", "0.5295619...
0.80123305
0
From all the data, it takes the columns TopicID, and count the topic based on the ethnicity
def get_data_frame_count_black_ethnicity_by_topic(data_frame: DataFrame) -> pb.DataFrame: data_frame_topic = data_frame \ .filter(data_frame["Stratification1"].contains("Black, non-Hispanic")) \ .distinct() \ .groupBy("TopicID") \ .count() \ .sort("TopicID") print("The following table represent the number of black ethnicity people group by the topic: ") data_frame_topic.show() data_frame_pandas = data_frame.toPandas() return data_frame_pandas
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_data_frame_count_male_gender_by_topic(data_frame: DataFrame) -> pb.DataFrame:\n data_frame_topic = data_frame \\\n .filter(data_frame[\"Stratification1\"].contains(\"Male\")) \\\n .distinct() \\\n .groupBy(\"TopicID\") \\\n .count() \\\n .sort(\"TopicID\")\n\n print...
[ "0.670414", "0.6682198", "0.6473323", "0.63052845", "0.6284073", "0.5883223", "0.58540165", "0.5768975", "0.56314296", "0.5595589", "0.55580306", "0.5541051", "0.5484164", "0.54825205", "0.5471465", "0.5443077", "0.53968257", "0.5374345", "0.5362987", "0.5333066", "0.5320533"...
0.7382751
0
Plot a data frame with bar type
def plot_type_of_topic(data_frame: pb.DataFrame) -> None: plt.interactive(False) plt.figure() data_frame.plot(kind='bar', x= data_frame['TopicID']) plt.show()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compte(df):\n\n df.value_counts()[:100].plot(kind='bar')\n plt.show()", "def bar_plot(df, data_pt):\n \n x=df.loc[data_pt]\n y= df.columns.tolist()\n sorte=x.tolist()\n a=sorted(zip(sorte, y))[-10:]\n y=[y for _, y in a]\n ## soru burda yapıp altı ona göre duzeliyecegim birde\n ...
[ "0.72341543", "0.7051117", "0.7032588", "0.702026", "0.6910697", "0.6906154", "0.68671536", "0.67882943", "0.6782081", "0.6735545", "0.6699668", "0.6685477", "0.66498506", "0.663799", "0.6616477", "0.6604195", "0.6479067", "0.6469803", "0.64694345", "0.64263964", "0.6424513",...
0.7253609
0
Plot a data frame with bar type
def plot_type_of_two_topic(data_frame1: pb.DataFrame, data_frame2: pb.DataFrame) -> None: plt.interactive(False) plt.figure() data_frame1.plot(kind='bar', x= data_frame['TopicID']) data_frame2.plot(kind='bar', x= data_frame['TopicID']) plt.show()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_type_of_topic(data_frame: pb.DataFrame) -> None:\n plt.interactive(False)\n plt.figure()\n data_frame.plot(kind='bar', x= data_frame['TopicID'])\n plt.show()", "def compte(df):\n\n df.value_counts()[:100].plot(kind='bar')\n plt.show()", "def bar_plot(df, data_pt):\n \n x=df.loc...
[ "0.7253609", "0.72341543", "0.7051117", "0.7032588", "0.702026", "0.6910697", "0.6906154", "0.68671536", "0.67882943", "0.6782081", "0.6735545", "0.6699668", "0.6685477", "0.66498506", "0.663799", "0.6616477", "0.6604195", "0.6479067", "0.6469803", "0.64694345", "0.64263964",...
0.60265684
56
Return the project name when printed.
def __str__(self): return self.project_name
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def project_name(self):\n pass", "def getProjectName():", "def full_name(self):\n if not self.project_id:\n raise ValueError('Missing project ID.')\n return 'projects/%s' % (self.project_id)", "def get_project_name(self):\n return self.line_edit.text()", "def project(...
[ "0.8392448", "0.82995224", "0.7878728", "0.78781843", "0.7737", "0.7737", "0.7737", "0.7737", "0.7737", "0.7737", "0.7737", "0.7737", "0.7737", "0.7737", "0.7737", "0.7737", "0.7737", "0.76813203", "0.76813203", "0.76813203", "0.76813203", "0.76813203", "0.76813203", "0...
0.77389073
5
Return the skill name.
def __str__(self): return self.skill
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def skill(self):\n return self._get(\"skill\")", "def test_get_skill_name(self):\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"get\", \"skills.dummy.name\"],\n standalone_mode=False,\n )\n assert result.exit_code == 0\n ...
[ "0.77862394", "0.7630785", "0.7034226", "0.6951422", "0.67984796", "0.6626635", "0.66073817", "0.6603298", "0.6590914", "0.6590914", "0.6590914", "0.6585181", "0.6584061", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", ...
0.702125
3
Returns the account for the given client. If it does not exist a new one is created and returned
def get_account(self, client: int): try: return self.accounts[client] except KeyError: return self._create_account(client)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_client(self, clientname):\n client = self.dbsession.query(Client).filter_by(clientname=clientname).all()\n if not client:\n return self.create_client({'clientname': clientname})\n else:\n return client[0]", "def get_client(self, user_id: int, client_name: str) -...
[ "0.66133344", "0.6345957", "0.6296915", "0.62183553", "0.6195077", "0.61447585", "0.6143067", "0.61124474", "0.6101005", "0.6082482", "0.6080354", "0.60321856", "0.60321856", "0.60195756", "0.5999535", "0.59913605", "0.5953246", "0.5937958", "0.5926711", "0.59228164", "0.5893...
0.9063625
0
Write a volume to a file path.
def write(img, path): create_directories_for_file_name(path) writer = sitk.ImageFileWriter() writer.Execute(img, path, True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write(self, path, **kwargs):\n client = self.connect(VAULT_TOKEN)\n client.write(path, **kwargs)", "def write_to_path(self, path):\n assert not path.exists()\n fout = path.open(\"wb\")\n fout.write(self.to_string())\n assert not fout.close()\n path.setdata()",...
[ "0.6251699", "0.60698074", "0.60018075", "0.5975603", "0.5835609", "0.5832426", "0.5821404", "0.5796797", "0.5752071", "0.5750823", "0.57148486", "0.5701186", "0.5690606", "0.56724447", "0.565024", "0.56472427", "0.5638502", "0.56315726", "0.56258756", "0.5602037", "0.5573271...
0.5269131
62
r""" Calculates precipitable water (cm) from ambient air temperature (C) and relatively humidity (%) using an empirical model. The accuracy of this method is approximately 20% for moderate PW (13 cm) and less accurate otherwise.
def gueymard94_pw(temp_air, relative_humidity): T = temp_air + 273.15 # Convert to Kelvin # noqa: N806 RH = relative_humidity # noqa: N806 theta = T / 273.15 # Eq. 1 from Keogh and Blakers pw = ( 0.1 * (0.4976 + 1.5265*theta + np.exp(13.6897*theta - 14.9188*(theta)**3)) * (216.7*RH/(100*T)*np.exp(22.330 - 49.140*(100/T) - 10.922*(100/T)**2 - 0.39015*T/100))) pw = np.maximum(pw, 0.1) return pw
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_compensated_temperature() -> float:\n comp_factor = 2.25\n cpu_temp = get_cpu_temperature()\n raw_temp = bme280.get_temperature()\n comp_temp = raw_temp - ((cpu_temp - raw_temp) / comp_factor)\n # print(\"\"\"\n # Compensated_Temperature: {:05.2f} *C\n # Pressure: {:05.2f} hPa\n # R...
[ "0.6271697", "0.61329305", "0.61147255", "0.60481554", "0.5957173", "0.59200716", "0.58812517", "0.5873447", "0.5873447", "0.58698595", "0.58185816", "0.57858187", "0.5785377", "0.572641", "0.56684095", "0.56592894", "0.56545794", "0.56407183", "0.5634677", "0.56337875", "0.5...
0.542909
35
Approximate broadband aerosol optical depth. Bird and Hulstrom developed a correlation for broadband aerosol optical depth (AOD) using two wavelengths, 380 nm and 500 nm.
def bird_hulstrom80_aod_bb(aod380, aod500): # approximate broadband AOD using (Bird-Hulstrom 1980) return 0.27583 * aod380 + 0.35 * aod500
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculate_blue_haze_TOA(d,sza,L):\n rho_blue_TOA=np.pi*(d**2)*L[:,5,:]/(2.16*np.cos(sza))\n return rho_blue_TOA", "def ead(o2, depth):\n fraction_o2 = percentage_to_fraction(o2)\n fraction_n2 = 1.0 - fraction_o2\n return math.ceil(((depth + 10.0) * (fraction_n2 / 0.79)) - 10.0)", "def optica...
[ "0.5567054", "0.55453414", "0.5525464", "0.5403252", "0.539342", "0.53296256", "0.53177613", "0.52844834", "0.52782506", "0.524908", "0.52308244", "0.52085507", "0.5208206", "0.5199287", "0.5178122", "0.51560545", "0.51540154", "0.51226914", "0.51127833", "0.5078825", "0.5057...
0.53029495
7
Calculate Linke turbidity using Kasten pyrheliometric formula. Note that broadband aerosol optical depth (AOD) can be approximated by AOD measured at 700 nm according to Molineaux [4] . Bird and Hulstrom offer an alternate approximation using AOD measured at 380 nm and 500 nm. Based on original implementation by Armel Oumbe.
def kasten96_lt(airmass_absolute, precipitable_water, aod_bb): # "From numerically integrated spectral simulations done with Modtran # (Berk, 1989), Molineaux (1998) obtained for the broadband optical depth # of a clean and dry atmospshere (fictitious atmosphere that comprises only # the effects of Rayleigh scattering and absorption by the atmosphere gases # other than the water vapor) the following expression" # - P. Ineichen (2008) delta_cda = -0.101 + 0.235 * airmass_absolute ** (-0.16) # "and the broadband water vapor optical depth where pwat is the integrated # precipitable water vapor content of the atmosphere expressed in cm and am # the optical air mass. The precision of these fits is better than 1% when # compared with Modtran simulations in the range 1 < am < 5 and # 0 < pwat < 5 cm at sea level" - P. Ineichen (2008) delta_w = 0.112 * airmass_absolute ** (-0.55) * precipitable_water ** 0.34 # broadband AOD delta_a = aod_bb # "Then using the Kasten pyrheliometric formula (1980, 1996), the Linke # turbidity at am = 2 can be written. The extension of the Linke turbidity # coefficient to other values of air mass was published by Ineichen and # Perez (2002)" - P. Ineichen (2008) lt = -(9.4 + 0.9 * airmass_absolute) * np.log( np.exp(-airmass_absolute * (delta_cda + delta_w + delta_a)) ) / airmass_absolute # filter out of extrapolated values return lt
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def estimate_lwdown(tairK, rh):\n zeroC = 273.15\n\n sat_vapress = 611.2 * np.exp(17.67 * ((tairK - zeroC) / (tairK - 29.65)))\n vapress = np.maximum(5.0, rh) / 100. * sat_vapress\n lw_down = 2.648 * tairK + 0.0346 * vapress - 474.0\n\n return lw_down", "def derive_RiekeLebofsky(wavelength):\n ...
[ "0.6074756", "0.60489684", "0.6021683", "0.58620036", "0.5819184", "0.5817391", "0.5701168", "0.56542647", "0.5622918", "0.5620226", "0.5618999", "0.55917037", "0.55595964", "0.5550017", "0.5537136", "0.5530169", "0.5526478", "0.5487843", "0.5477624", "0.5460396", "0.54503703...
0.64853966
0
r""" Get AOD at specified wavelength using Angstrom turbidity model.
def angstrom_aod_at_lambda(aod0, lambda0, alpha=1.14, lambda1=700.0): return aod0 * ((lambda1 / lambda0) ** (-alpha))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_experimental_spectra(mol):\n\n data = pd.read_csv(mol, sep=',')\n wavelength = data.values[:, 0]\n\n absorption = data.values[:, 1]\n\n func = interp1d(wavelength, absorption, kind='quadratic')\n wavelength_new = 1. / np.linspace(1. / wavelength.max(), 1. / wavelength.min(), 100)\n absorp...
[ "0.5712585", "0.5581919", "0.5486108", "0.53831595", "0.53533727", "0.53258014", "0.5305268", "0.526969", "0.52618587", "0.52508855", "0.52357584", "0.51796144", "0.516192", "0.5157981", "0.5145479", "0.5129892", "0.50970304", "0.50838137", "0.5080797", "0.5045471", "0.504416...
0.0
-1
r""" Calculate Angstrom alpha exponent.
def angstrom_alpha(aod1, lambda1, aod2, lambda2): return - np.log(aod1 / aod2) / np.log(lambda1 / lambda2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculate_exponent():\n pass", "def powerlaw(E,alpha,A):\n\n\treturn A*E**alpha", "def calc_alpha(epsilon): \n return float(0.5 * np.log((1-epsilon)/epsilon))", "def encode_exponent(e: int) -> int:\n assert 0 <= e <= MAX_EXPONENT\n return DECODING_TABLE[e]", "def test_exp_decay(self, alp...
[ "0.71915054", "0.6592303", "0.64282143", "0.6347429", "0.63466847", "0.63178813", "0.63117254", "0.6303169", "0.6258958", "0.62512463", "0.6246484", "0.62283045", "0.62211376", "0.62090594", "0.61820316", "0.6104411", "0.61022353", "0.608179", "0.60706884", "0.60041946", "0.5...
0.56222945
50
Given a file name for baby.html, returns a list starting with the year string followed by the namerank strings in alphabetical order. ['2006', 'Aaliyah 91', Aaron 57', 'Abagail 895', ' ...]
def extract_names(filename): f = open(filename,'rU') name_data = f.read() year_data= re.search(r'Popularity\sin\s(\d\d\d\d)', name_data) if not year_data : print ' no year found ' sys.exit(1) name_year=year_data.group(1) #print 'year :' #print name_year tuples=re.findall(r'<td>(\d+)</td><td>(\w+)</td><td>(\w+)</td>',name_data) #print 'tuples' #print tuples dict_name = {} for a,b,c in tuples : #print a + ' boy name: ' + b + ' , girl name : ' + c if b not in dict_name : dict_name[b] = a if c not in dict_name : dict_name[c] = a #print dict_name lst_names = sorted(dict_name.keys()) result_names_sorted = [] result_names_sorted.append(name_year) for name in lst_names : #print name + " : " + dict_name[name] result_names_sorted.append(name + ' ' + dict_name[name]) #print result_names_sorted return result_names_sorted
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def extract_names(filename):\n\n # Extracting the year\n year_match = re.search(r'\\d\\d\\d\\d', filename)\n if not year_match:\n sys.stderr.write('Could not find a year!\\n')\n sys.exit()\n year = year_match.group()\n\n # Opening the file\n try:\n with open(filename) as file...
[ "0.7748594", "0.74819154", "0.73469853", "0.7098531", "0.70823675", "0.6961224", "0.67496955", "0.6549014", "0.6204802", "0.6009291", "0.5994891", "0.59557843", "0.59324557", "0.591772", "0.5879463", "0.5868835", "0.5803098", "0.5757769", "0.57187426", "0.57022166", "0.569899...
0.7415442
2
input h (meters) and the coefficients for the linear profile for the free troposphere theta (ft_intercept (K) and slope gamma (K/m)) return the free tropospher theta at height h
def theta_ft(h,ft_intercept,gamma): theta_top = ft_intercept + h*gamma return theta_top
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_theta_surf_hex_h(theta_hs_in_h: float, theta_hs_out_h: float, v_hs: float) -> float:\n\n c = get_specific_heat()\n rho = get_air_density()\n\n # sensible heating capacity of heat source for heating, W\n q_hs_h = (theta_hs_out_h - theta_hs_in_h) * c * rho * v_hs / 3600\n\n # sensible heat tra...
[ "0.5862929", "0.5850773", "0.57733494", "0.5686483", "0.56282175", "0.5592743", "0.55332625", "0.55067706", "0.55043215", "0.5491298", "0.5484844", "0.5470747", "0.5449616", "0.5431975", "0.5429191", "0.5417069", "0.54054326", "0.5379685", "0.53791565", "0.5375629", "0.536189...
0.75179046
0
the_vars[0]= thetabar the_vars[1] = h the_vars[2] = qv surface flux from drag law with subsidence and diagnosed deltheta
def dmixed_vars(the_vars,tstep,coeffs): deltheta = theta_ft(the_vars[1],coeffs.ft_intercept,coeffs.ft_gamma) - the_vars[0] F0 = coeffs.U*coeffs.Cd*(coeffs.sst - the_vars[0]) #surface heat flux Fqv0 = coeffs.U*coeffs.Cd*(coeffs.qsfc - the_vars[2]) #surface vapor flux Fint = -coeffs.k*F0 #entrainment heat flux if coeffs.use_NT: # use NT parameterization by calculating we using function went = calc_went_NT(the_vars, coeffs, deltheta, F0, Fqv0) # Nicholls-Turton parameterization else: # use simple we parameterization went = -Fint/deltheta #simple entrainment parameterization # calculate delta_Fr delta_Frstar = 82.0 # Wm^-2 Frlambda = 7.9 # Wm^-2, using with CTL from Gesso delta_Fr = delta_Frstar - Frlambda*coeffs.ft_qv*1000 # convert qt_ft to g kg^-1 Fqvent = -went*( coeffs.ft_qv - the_vars[2]) wsubs = -coeffs.D*the_vars[1] rho=1. cp=1004. derivs=np.empty_like(the_vars) # higher delta_Fr from drier air at mixed-layer top...hence cloudy air results in less radiative cooling derivs[0]=(F0 - Fint)/(the_vars[1]*rho) - delta_Fr/1004./the_vars[1] derivs[1] = went + wsubs derivs[2] = (Fqv0 - Fqvent)/the_vars[1] return derivs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ha(env, cstate=0):\n T1 = 10\n T2 = 10\n thM = 20\n thm = 5\n vr = 10.5\n v1 = -1.3\n v2 = -2.7\n assert(T1 == T2)\n\n delta = None # None to cause failure\n # The continous variables used in this ha\n x = T1 # clock1 variable\n y = T2 ...
[ "0.6345493", "0.6289429", "0.6155934", "0.6074104", "0.60056967", "0.5953673", "0.59231526", "0.5861008", "0.5852607", "0.5842901", "0.58290935", "0.58032346", "0.57665646", "0.576412", "0.57412446", "0.57161164", "0.5708992", "0.5680603", "0.56741995", "0.5667289", "0.565103...
0.7249938
0
NichollsTurton entrainment parameterization the_vars and coeffs are inputs into dmixed_vars deltheta, F0, Fqv0 are calculated in dmixed_vars
def calc_went_NT(the_vars, coeffs, deltheta, F0, Fqv0): thetal_m = the_vars[0] qt_m = the_vars[2] zi = the_vars[1] dth = deltheta thetal_ft = thetal_m + dth qt_ft = coeffs.ft_qv dqt = qt_ft - qt_m # calculate thetal at z = 3000 m (take qt(z = 3000m) = qt(z = h), so delta_qt = dqt) gamma = 6e-3 thetal_3000 = thetal_ft + gamma*(3000-zi) LTS = thetal_3000 - coeffs.sst # lower tropospheric stability # calculate coefficients press=tf.find_press(zi) Ad,Bd,issat = tf.calc_ABcoeffs(thetal_ft,qt_ft,press) Aw,Bw,issat = tf.calc_ABcoeffs(thetal_m,qt_m,press) invert= tf.t_uos_thetal(thetal_m,qt_m,press) T_0 = invert.temp lv=tf.L_t(invert.temp) Cl = (Ad*lv/tc.CPD - T_0/tc.EPS) del_thv_dry = Ad * dth + Bd * dqt del_thv_sat = Aw * dth + Bw * dqt # account for evaporative cooling (increases we) ql_max = invert.ql Cl = (Ad*lv/tc.CPD - T_0/tc.EPS) Del_thv = del_thv_dry - Cl * ql_max # calculate buoyancy integral terms rho = 1. lcl_press=tf.LCL_thetal(thetal_m,qt_m) zb=tf.find_height(lcl_press) T1 = zb/zi T2 = 0.5 * zb**2 / zi**2 T3 = (zi-zb)/zi T4 = 0.5 * (zi**2 - zb**2) / zi**2 # calculate delta_Fr delta_Frstar = 82.0 # Wm^-2 Frlambda = 7.9 # Wm^-2, using with CTL from Gesso delta_Fr = delta_Frstar - Frlambda*qt_ft*1000 # convert qt_ft to g kg^-1 wtl_0=F0 wqt_0=Fqv0 Del_F = delta_Fr/(tc.CPD*rho) # use sensitivity to radiation a la Gesso Fig. 3 term1 = wtl_0 * (Ad * (T1-T2) + Aw * (T3-T4)) term2 = wqt_0 * (Bd * (T1-T2) + Bw * (T3-T4)) term3 = Del_F * (Ad * T2 + Aw * T4) Theta_NE = term1 + term2 + term3 # calculate w* wstar=(2.5*9.8/T_0*zi*Theta_NE)**(1/3.) # calculate chi* chi_star = Cl * ql_max / (del_thv_dry - del_thv_sat) # calculate del_m Del_m = del_thv_dry + chi_star * (2. - chi_star) * (del_thv_sat - del_thv_dry) # calculate we a2=15. Del_thv_NT = Del_thv / (1. + a2 * (1. - Del_m/Del_thv)) A_NT = 0.2 fac_NT = 2.5 term4 = Del_thv_NT term5 = A_NT * fac_NT * (T2 * del_thv_dry + T4 * del_thv_sat) denominator = term4 + term5 we = A_NT * fac_NT * Theta_NE / denominator return we
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dmixed_vars(the_vars,tstep,coeffs):\n\n deltheta = theta_ft(the_vars[1],coeffs.ft_intercept,coeffs.ft_gamma) - the_vars[0]\n F0 = coeffs.U*coeffs.Cd*(coeffs.sst - the_vars[0]) #surface heat flux\n Fqv0 = coeffs.U*coeffs.Cd*(coeffs.qsfc - the_vars[2]) #surface vapor flux\n Fint = -coeffs.k*F0 #en...
[ "0.80656624", "0.5865987", "0.5693037", "0.5615564", "0.55575585", "0.55294347", "0.55230105", "0.5521183", "0.5503675", "0.5450836", "0.540424", "0.53514105", "0.5325137", "0.53189623", "0.5317447", "0.5312849", "0.5308044", "0.5304588", "0.530239", "0.5301694", "0.5296928",...
0.73483014
1
find the lcl (in m) for a row in the dataframe
def calc_lcl(row,psfc): Tdew = tf.tmr(row['qv'],psfc) LCL = tf.LCL(Tdew,row['theta'],psfc) #kPa # # rough approximation: 10 kPa = 1 km # delp=psfc - LCL lcl_h = delp*100. return lcl_h
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_index_lm(l, m):\n return (l+1)**2 -1 -l + m", "def compute_kl(self, df):\n value_counts = [df[col].value_counts() for col in self.hist_cols]\n next_hists = self.value_counts_to_hists(value_counts)\n\n if self.prev_hists is None:\n self.prev_hists = next_hists\n ...
[ "0.59105355", "0.57944614", "0.5768694", "0.5714691", "0.5706475", "0.5483352", "0.5476161", "0.5426626", "0.53980875", "0.5363862", "0.53620285", "0.5346637", "0.530857", "0.5301486", "0.52655256", "0.52437156", "0.52226084", "0.52145016", "0.5176464", "0.51718956", "0.51507...
0.60421354
0
Adapted from interactive_vaporflux.ipynb sst, sea surface temperature (K) ft_qv, mixedlayer top qv (kg kg^1) use_NT, True or False outputs csv and json files with equilibrium values
def run_main(sst, ft_qv, use_NT): dtout=10. #minutes end_time=8*24. #hours del_time=dtout*60. #seconds end_time=end_time*3600. #seconds #sst=297 D=5.e-6 #s-1 U=7 #m/s psfc=100. #kPa qsfc=tf.qs_tp(sst,psfc) ft_intercept = 292 #K ft_gamma = 6.e-3 #K/m #ft_qv = 2.e-3 k=0.2 #entrainment efficiency Cd = 1.e-3 #drag coefficient tspan = np.arange(0.,end_time,del_time) vars_init=[285.,400.,8.e-3] #theta (K), height (m) qv (kg/kg) to start the_tup=dict(D=D,U=U,sst=sst,ft_intercept=ft_intercept,ft_gamma=ft_gamma, qsfc=qsfc,ft_qv=ft_qv,k=k,Cd=Cd,radcool=30.,use_NT=use_NT) # include use_NT the_tup=make_tuple(the_tup,'coeffs') output=integrate.odeint(dmixed_vars, vars_init, tspan,(the_tup,)) result=pd.DataFrame.from_records(output,columns=['theta','h','qv']) # save time/computation by only doing calculations for the last timestep (equilibrium) result['time']=tspan[-1]/3600./24. #days result['deltheta'] = theta_ft(result['h'].values[-1],ft_intercept,ft_gamma) - result['theta'].iloc[-1] result['delqv'] = ft_qv - result['qv'].iloc[-1] result['LCL'] = calc_lcl(result.iloc[-1], psfc) result['q_flux_0']=calc_sfc_qvap_flux(result.iloc[-1],the_tup) result['T_flux_0']=calc_sfc_theta_flux(result.iloc[-1],the_tup) result['entflux_theta']=calc_entflux_theta(result.iloc[-1],the_tup) # decide how to calculate entrainment the_vars = [result['theta'].iloc[-1],result['h'].iloc[-1],result['qv'].iloc[-1]] if use_NT: result['went']=calc_went_NT(the_vars, the_tup, result['deltheta'].iloc[-1], result['T_flux_0'].iloc[-1], result['q_flux_0'].iloc[-1]) else: result['went']=calc_went(result.iloc[-1],the_tup) result['entflux_qv']=calc_entflux_qv(result.iloc[-1],the_tup) with open('dumpmodel.csv','w') as f: result.to_csv(f,index=False) return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calc_equil(sst, ft_qv, use_NT=False):\n \n run_main(sst, ft_qv, use_NT)\n \n # grab csv file\n with open('dumpmodel.csv','r') as f:\n df_result=pd.read_csv(f)\n\n # last time step into named tupple\n out=df_result.iloc[-1]\n steady_state=make_tuple(out.to_dict())\n steady_stat...
[ "0.6243293", "0.5701022", "0.5552244", "0.5491569", "0.5487227", "0.5479191", "0.54358137", "0.54272795", "0.5394585", "0.5392419", "0.53615534", "0.533306", "0.53179353", "0.5315923", "0.5295476", "0.5265205", "0.52420974", "0.5239961", "0.52259064", "0.52226466", "0.5221569...
0.64122236
0